You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

99 lines
3.5 KiB

  1. from __future__ import unicode_literals
  2. import re
  3. from .common import InfoExtractor
  4. from ..compat import compat_urlparse
  5. from ..utils import (
  6. int_or_none,
  7. js_to_json,
  8. mimetype2ext,
  9. ExtractorError,
  10. )
  11. class ImgurIE(InfoExtractor):
  12. _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)'
  13. _TESTS = [{
  14. 'url': 'https://i.imgur.com/A61SaA1.gifv',
  15. 'info_dict': {
  16. 'id': 'A61SaA1',
  17. 'ext': 'mp4',
  18. 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
  19. 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
  20. },
  21. }, {
  22. 'url': 'https://imgur.com/A61SaA1',
  23. 'info_dict': {
  24. 'id': 'A61SaA1',
  25. 'ext': 'mp4',
  26. 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
  27. 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
  28. },
  29. }]
  30. def _real_extract(self, url):
  31. video_id = self._match_id(url)
  32. webpage = self._download_webpage(
  33. compat_urlparse.urljoin(url, video_id), video_id)
  34. width = int_or_none(self._search_regex(
  35. r'<param name="width" value="([0-9]+)"',
  36. webpage, 'width', fatal=False))
  37. height = int_or_none(self._search_regex(
  38. r'<param name="height" value="([0-9]+)"',
  39. webpage, 'height', fatal=False))
  40. video_elements = self._search_regex(
  41. r'(?s)<div class="video-elements">(.*?)</div>',
  42. webpage, 'video elements', default=None)
  43. if not video_elements:
  44. raise ExtractorError(
  45. 'No sources found for video %s. Maybe an image?' % video_id,
  46. expected=True)
  47. formats = []
  48. for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
  49. formats.append({
  50. 'format_id': m.group('type').partition('/')[2],
  51. 'url': self._proto_relative_url(m.group('src')),
  52. 'ext': mimetype2ext(m.group('type')),
  53. 'acodec': 'none',
  54. 'width': width,
  55. 'height': height,
  56. 'http_headers': {
  57. 'User-Agent': 'youtube-dl (like wget)',
  58. },
  59. })
  60. gif_json = self._search_regex(
  61. r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
  62. webpage, 'GIF code', fatal=False)
  63. if gif_json:
  64. gifd = self._parse_json(
  65. gif_json, video_id, transform_source=js_to_json)
  66. formats.append({
  67. 'format_id': 'gif',
  68. 'preference': -10,
  69. 'width': width,
  70. 'height': height,
  71. 'ext': 'gif',
  72. 'acodec': 'none',
  73. 'vcodec': 'gif',
  74. 'container': 'gif',
  75. 'url': self._proto_relative_url(gifd['gifUrl']),
  76. 'filesize': gifd.get('size'),
  77. 'http_headers': {
  78. 'User-Agent': 'youtube-dl (like wget)',
  79. },
  80. })
  81. self._sort_formats(formats)
  82. return {
  83. 'id': video_id,
  84. 'formats': formats,
  85. 'description': self._og_search_description(webpage),
  86. 'title': self._og_search_title(webpage),
  87. }