You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

664 lines
26 KiB

  1. # encoding: utf-8
  2. from __future__ import unicode_literals
  3. import json
  4. import re
  5. import itertools
  6. from .common import InfoExtractor
  7. from .subtitles import SubtitlesInfoExtractor
  8. from ..compat import (
  9. compat_HTTPError,
  10. compat_urllib_parse,
  11. compat_urllib_request,
  12. compat_urlparse,
  13. )
  14. from ..utils import (
  15. ExtractorError,
  16. InAdvancePagedList,
  17. int_or_none,
  18. RegexNotFoundError,
  19. smuggle_url,
  20. std_headers,
  21. unsmuggle_url,
  22. urlencode_postdata,
  23. )
  24. class VimeoBaseInfoExtractor(InfoExtractor):
  25. _NETRC_MACHINE = 'vimeo'
  26. _LOGIN_REQUIRED = False
  27. def _login(self):
  28. (username, password) = self._get_login_info()
  29. if username is None:
  30. if self._LOGIN_REQUIRED:
  31. raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
  32. return
  33. self.report_login()
  34. login_url = 'https://vimeo.com/log_in'
  35. webpage = self._download_webpage(login_url, None, False)
  36. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  37. data = urlencode_postdata({
  38. 'email': username,
  39. 'password': password,
  40. 'action': 'login',
  41. 'service': 'vimeo',
  42. 'token': token,
  43. })
  44. login_request = compat_urllib_request.Request(login_url, data)
  45. login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  46. login_request.add_header('Cookie', 'xsrft=%s' % token)
  47. self._download_webpage(login_request, None, False, 'Wrong login info')
  48. class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
  49. """Information extractor for vimeo.com."""
  50. # _VALID_URL matches Vimeo URLs
  51. _VALID_URL = r'''(?x)
  52. https?://
  53. (?:(?:www|(?P<player>player))\.)?
  54. vimeo(?P<pro>pro)?\.com/
  55. (?!channels/[^/?#]+/?(?:$|[?#])|album/)
  56. (?:.*?/)?
  57. (?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
  58. (?:videos?/)?
  59. (?P<id>[0-9]+)
  60. /?(?:[?&].*)?(?:[#].*)?$'''
  61. IE_NAME = 'vimeo'
  62. _TESTS = [
  63. {
  64. 'url': 'http://vimeo.com/56015672#at=0',
  65. 'md5': '8879b6cc097e987f02484baf890129e5',
  66. 'info_dict': {
  67. 'id': '56015672',
  68. 'ext': 'mp4',
  69. "upload_date": "20121220",
  70. "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
  71. "uploader_id": "user7108434",
  72. "uploader": "Filippo Valsorda",
  73. "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
  74. "duration": 10,
  75. },
  76. },
  77. {
  78. 'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
  79. 'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
  80. 'note': 'Vimeo Pro video (#1197)',
  81. 'info_dict': {
  82. 'id': '68093876',
  83. 'ext': 'mp4',
  84. 'uploader_id': 'openstreetmapus',
  85. 'uploader': 'OpenStreetMap US',
  86. 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
  87. 'description': 'md5:380943ec71b89736ff4bf27183233d09',
  88. 'duration': 1595,
  89. },
  90. },
  91. {
  92. 'url': 'http://player.vimeo.com/video/54469442',
  93. 'md5': '619b811a4417aa4abe78dc653becf511',
  94. 'note': 'Videos that embed the url in the player page',
  95. 'info_dict': {
  96. 'id': '54469442',
  97. 'ext': 'mp4',
  98. 'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
  99. 'uploader': 'The BLN & Business of Software',
  100. 'uploader_id': 'theblnbusinessofsoftware',
  101. 'duration': 3610,
  102. 'description': None,
  103. },
  104. },
  105. {
  106. 'url': 'http://vimeo.com/68375962',
  107. 'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
  108. 'note': 'Video protected with password',
  109. 'info_dict': {
  110. 'id': '68375962',
  111. 'ext': 'mp4',
  112. 'title': 'youtube-dl password protected test video',
  113. 'upload_date': '20130614',
  114. 'uploader_id': 'user18948128',
  115. 'uploader': 'Jaime Marquínez Ferrándiz',
  116. 'duration': 10,
  117. 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
  118. },
  119. 'params': {
  120. 'videopassword': 'youtube-dl',
  121. },
  122. },
  123. {
  124. 'url': 'http://vimeo.com/channels/keypeele/75629013',
  125. 'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
  126. 'note': 'Video is freely available via original URL '
  127. 'and protected with password when accessed via http://vimeo.com/75629013',
  128. 'info_dict': {
  129. 'id': '75629013',
  130. 'ext': 'mp4',
  131. 'title': 'Key & Peele: Terrorist Interrogation',
  132. 'description': 'md5:8678b246399b070816b12313e8b4eb5c',
  133. 'uploader_id': 'atencio',
  134. 'uploader': 'Peter Atencio',
  135. 'duration': 187,
  136. },
  137. },
  138. {
  139. 'url': 'http://vimeo.com/76979871',
  140. 'md5': '3363dd6ffebe3784d56f4132317fd446',
  141. 'note': 'Video with subtitles',
  142. 'info_dict': {
  143. 'id': '76979871',
  144. 'ext': 'mp4',
  145. 'title': 'The New Vimeo Player (You Know, For Videos)',
  146. 'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
  147. 'upload_date': '20131015',
  148. 'uploader_id': 'staff',
  149. 'uploader': 'Vimeo Staff',
  150. 'duration': 62,
  151. }
  152. },
  153. {
  154. # from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
  155. 'url': 'https://player.vimeo.com/video/98044508',
  156. 'note': 'The js code contains assignments to the same variable as the config',
  157. 'info_dict': {
  158. 'id': '98044508',
  159. 'ext': 'mp4',
  160. 'title': 'Pier Solar OUYA Official Trailer',
  161. 'uploader': 'Tulio Gonçalves',
  162. 'uploader_id': 'user28849593',
  163. },
  164. },
  165. ]
  166. def _verify_video_password(self, url, video_id, webpage):
  167. password = self._downloader.params.get('videopassword', None)
  168. if password is None:
  169. raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
  170. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  171. data = compat_urllib_parse.urlencode({
  172. 'password': password,
  173. 'token': token,
  174. })
  175. # I didn't manage to use the password with https
  176. if url.startswith('https'):
  177. pass_url = url.replace('https', 'http')
  178. else:
  179. pass_url = url
  180. password_request = compat_urllib_request.Request(pass_url + '/password', data)
  181. password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  182. password_request.add_header('Cookie', 'xsrft=%s' % token)
  183. return self._download_webpage(
  184. password_request, video_id,
  185. 'Verifying the password', 'Wrong password')
  186. def _verify_player_video_password(self, url, video_id):
  187. password = self._downloader.params.get('videopassword', None)
  188. if password is None:
  189. raise ExtractorError('This video is protected by a password, use the --video-password option')
  190. data = compat_urllib_parse.urlencode({'password': password})
  191. pass_url = url + '/check-password'
  192. password_request = compat_urllib_request.Request(pass_url, data)
  193. password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
  194. return self._download_json(
  195. password_request, video_id,
  196. 'Verifying the password',
  197. 'Wrong password')
  198. def _real_initialize(self):
  199. self._login()
  200. def _real_extract(self, url):
  201. url, data = unsmuggle_url(url)
  202. headers = std_headers
  203. if data is not None:
  204. headers = headers.copy()
  205. headers.update(data)
  206. if 'Referer' not in headers:
  207. headers['Referer'] = url
  208. # Extract ID from URL
  209. mobj = re.match(self._VALID_URL, url)
  210. video_id = mobj.group('id')
  211. orig_url = url
  212. if mobj.group('pro') or mobj.group('player'):
  213. url = 'http://player.vimeo.com/video/' + video_id
  214. # Retrieve video webpage to extract further information
  215. request = compat_urllib_request.Request(url, None, headers)
  216. try:
  217. webpage = self._download_webpage(request, video_id)
  218. except ExtractorError as ee:
  219. if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
  220. errmsg = ee.cause.read()
  221. if b'Because of its privacy settings, this video cannot be played here' in errmsg:
  222. raise ExtractorError(
  223. 'Cannot download embed-only video without embedding '
  224. 'URL. Please call youtube-dl with the URL of the page '
  225. 'that embeds this video.',
  226. expected=True)
  227. raise
  228. # Now we begin extracting as much information as we can from what we
  229. # retrieved. First we extract the information common to all extractors,
  230. # and latter we extract those that are Vimeo specific.
  231. self.report_extraction(video_id)
  232. # Extract the config JSON
  233. try:
  234. try:
  235. config_url = self._html_search_regex(
  236. r' data-config-url="(.+?)"', webpage, 'config URL')
  237. config_json = self._download_webpage(config_url, video_id)
  238. config = json.loads(config_json)
  239. except RegexNotFoundError:
  240. # For pro videos or player.vimeo.com urls
  241. # We try to find out to which variable is assigned the config dic
  242. m_variable_name = re.search('(\w)\.video\.id', webpage)
  243. if m_variable_name is not None:
  244. config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
  245. else:
  246. config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
  247. config = self._search_regex(config_re, webpage, 'info section',
  248. flags=re.DOTALL)
  249. config = json.loads(config)
  250. except Exception as e:
  251. if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
  252. raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
  253. if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
  254. if data and '_video_password_verified' in data:
  255. raise ExtractorError('video password verification failed!')
  256. self._verify_video_password(url, video_id, webpage)
  257. return self._real_extract(
  258. smuggle_url(url, {'_video_password_verified': 'verified'}))
  259. else:
  260. raise ExtractorError('Unable to extract info section',
  261. cause=e)
  262. else:
  263. if config.get('view') == 4:
  264. config = self._verify_player_video_password(url, video_id)
  265. # Extract title
  266. video_title = config["video"]["title"]
  267. # Extract uploader and uploader_id
  268. video_uploader = config["video"]["owner"]["name"]
  269. video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
  270. # Extract video thumbnail
  271. video_thumbnail = config["video"].get("thumbnail")
  272. if video_thumbnail is None:
  273. video_thumbs = config["video"].get("thumbs")
  274. if video_thumbs and isinstance(video_thumbs, dict):
  275. _, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
  276. # Extract video description
  277. video_description = self._html_search_regex(
  278. r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
  279. webpage, 'description', default=None)
  280. if not video_description:
  281. video_description = self._html_search_meta(
  282. 'description', webpage, default=None)
  283. if not video_description and mobj.group('pro'):
  284. orig_webpage = self._download_webpage(
  285. orig_url, video_id,
  286. note='Downloading webpage for description',
  287. fatal=False)
  288. if orig_webpage:
  289. video_description = self._html_search_meta(
  290. 'description', orig_webpage, default=None)
  291. if not video_description and not mobj.group('player'):
  292. self._downloader.report_warning('Cannot find video description')
  293. # Extract video duration
  294. video_duration = int_or_none(config["video"].get("duration"))
  295. # Extract upload date
  296. video_upload_date = None
  297. mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
  298. if mobj is not None:
  299. video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
  300. try:
  301. view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
  302. like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
  303. comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
  304. except RegexNotFoundError:
  305. # This info is only available in vimeo.com/{id} urls
  306. view_count = None
  307. like_count = None
  308. comment_count = None
  309. # Vimeo specific: extract request signature and timestamp
  310. sig = config['request']['signature']
  311. timestamp = config['request']['timestamp']
  312. # Vimeo specific: extract video codec and quality information
  313. # First consider quality, then codecs, then take everything
  314. codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
  315. files = {'hd': [], 'sd': [], 'other': []}
  316. config_files = config["video"].get("files") or config["request"].get("files")
  317. for codec_name, codec_extension in codecs:
  318. for quality in config_files.get(codec_name, []):
  319. format_id = '-'.join((codec_name, quality)).lower()
  320. key = quality if quality in files else 'other'
  321. video_url = None
  322. if isinstance(config_files[codec_name], dict):
  323. file_info = config_files[codec_name][quality]
  324. video_url = file_info.get('url')
  325. else:
  326. file_info = {}
  327. if video_url is None:
  328. video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
  329. % (video_id, sig, timestamp, quality, codec_name.upper())
  330. files[key].append({
  331. 'ext': codec_extension,
  332. 'url': video_url,
  333. 'format_id': format_id,
  334. 'width': file_info.get('width'),
  335. 'height': file_info.get('height'),
  336. })
  337. formats = []
  338. for key in ('other', 'sd', 'hd'):
  339. formats += files[key]
  340. if len(formats) == 0:
  341. raise ExtractorError('No known codec found')
  342. subtitles = {}
  343. text_tracks = config['request'].get('text_tracks')
  344. if text_tracks:
  345. for tt in text_tracks:
  346. subtitles[tt['lang']] = 'http://vimeo.com' + tt['url']
  347. video_subtitles = self.extract_subtitles(video_id, subtitles)
  348. if self._downloader.params.get('listsubtitles', False):
  349. self._list_available_subtitles(video_id, subtitles)
  350. return
  351. return {
  352. 'id': video_id,
  353. 'uploader': video_uploader,
  354. 'uploader_id': video_uploader_id,
  355. 'upload_date': video_upload_date,
  356. 'title': video_title,
  357. 'thumbnail': video_thumbnail,
  358. 'description': video_description,
  359. 'duration': video_duration,
  360. 'formats': formats,
  361. 'webpage_url': url,
  362. 'view_count': view_count,
  363. 'like_count': like_count,
  364. 'comment_count': comment_count,
  365. 'subtitles': video_subtitles,
  366. }
  367. class VimeoChannelIE(InfoExtractor):
  368. IE_NAME = 'vimeo:channel'
  369. _VALID_URL = r'https?://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
  370. _MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
  371. _TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
  372. _TESTS = [{
  373. 'url': 'http://vimeo.com/channels/tributes',
  374. 'info_dict': {
  375. 'id': 'tributes',
  376. 'title': 'Vimeo Tributes',
  377. },
  378. 'playlist_mincount': 25,
  379. }]
  380. def _page_url(self, base_url, pagenum):
  381. return '%s/videos/page:%d/' % (base_url, pagenum)
  382. def _extract_list_title(self, webpage):
  383. return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
  384. def _login_list_password(self, page_url, list_id, webpage):
  385. login_form = self._search_regex(
  386. r'(?s)<form[^>]+?id="pw_form"(.*?)</form>',
  387. webpage, 'login form', default=None)
  388. if not login_form:
  389. return webpage
  390. password = self._downloader.params.get('videopassword', None)
  391. if password is None:
  392. raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
  393. fields = dict(re.findall(r'''(?x)<input\s+
  394. type="hidden"\s+
  395. name="([^"]+)"\s+
  396. value="([^"]*)"
  397. ''', login_form))
  398. token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
  399. fields['token'] = token
  400. fields['password'] = password
  401. post = compat_urllib_parse.urlencode(fields)
  402. password_path = self._search_regex(
  403. r'action="([^"]+)"', login_form, 'password URL')
  404. password_url = compat_urlparse.urljoin(page_url, password_path)
  405. password_request = compat_urllib_request.Request(password_url, post)
  406. password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
  407. self._set_cookie('vimeo.com', 'xsrft', token)
  408. return self._download_webpage(
  409. password_request, list_id,
  410. 'Verifying the password', 'Wrong password')
  411. def _extract_videos(self, list_id, base_url):
  412. video_ids = []
  413. for pagenum in itertools.count(1):
  414. page_url = self._page_url(base_url, pagenum)
  415. webpage = self._download_webpage(
  416. page_url, list_id,
  417. 'Downloading page %s' % pagenum)
  418. if pagenum == 1:
  419. webpage = self._login_list_password(page_url, list_id, webpage)
  420. video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
  421. if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
  422. break
  423. entries = [self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
  424. for video_id in video_ids]
  425. return {'_type': 'playlist',
  426. 'id': list_id,
  427. 'title': self._extract_list_title(webpage),
  428. 'entries': entries,
  429. }
  430. def _real_extract(self, url):
  431. mobj = re.match(self._VALID_URL, url)
  432. channel_id = mobj.group('id')
  433. return self._extract_videos(channel_id, 'http://vimeo.com/channels/%s' % channel_id)
  434. class VimeoUserIE(VimeoChannelIE):
  435. IE_NAME = 'vimeo:user'
  436. _VALID_URL = r'https?://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
  437. _TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
  438. _TESTS = [{
  439. 'url': 'http://vimeo.com/nkistudio/videos',
  440. 'info_dict': {
  441. 'title': 'Nki',
  442. 'id': 'nkistudio',
  443. },
  444. 'playlist_mincount': 66,
  445. }]
  446. def _real_extract(self, url):
  447. mobj = re.match(self._VALID_URL, url)
  448. name = mobj.group('name')
  449. return self._extract_videos(name, 'http://vimeo.com/%s' % name)
  450. class VimeoAlbumIE(VimeoChannelIE):
  451. IE_NAME = 'vimeo:album'
  452. _VALID_URL = r'https?://vimeo\.com/album/(?P<id>\d+)'
  453. _TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
  454. _TESTS = [{
  455. 'url': 'http://vimeo.com/album/2632481',
  456. 'info_dict': {
  457. 'id': '2632481',
  458. 'title': 'Staff Favorites: November 2013',
  459. },
  460. 'playlist_mincount': 13,
  461. }, {
  462. 'note': 'Password-protected album',
  463. 'url': 'https://vimeo.com/album/3253534',
  464. 'info_dict': {
  465. 'title': 'test',
  466. 'id': '3253534',
  467. },
  468. 'playlist_count': 1,
  469. 'params': {
  470. 'videopassword': 'youtube-dl',
  471. }
  472. }]
  473. def _page_url(self, base_url, pagenum):
  474. return '%s/page:%d/' % (base_url, pagenum)
  475. def _real_extract(self, url):
  476. album_id = self._match_id(url)
  477. return self._extract_videos(album_id, 'http://vimeo.com/album/%s' % album_id)
  478. class VimeoGroupsIE(VimeoAlbumIE):
  479. IE_NAME = 'vimeo:group'
  480. _VALID_URL = r'(?:https?://)?vimeo\.com/groups/(?P<name>[^/]+)'
  481. _TESTS = [{
  482. 'url': 'http://vimeo.com/groups/rolexawards',
  483. 'info_dict': {
  484. 'id': 'rolexawards',
  485. 'title': 'Rolex Awards for Enterprise',
  486. },
  487. 'playlist_mincount': 73,
  488. }]
  489. def _extract_list_title(self, webpage):
  490. return self._og_search_title(webpage)
  491. def _real_extract(self, url):
  492. mobj = re.match(self._VALID_URL, url)
  493. name = mobj.group('name')
  494. return self._extract_videos(name, 'http://vimeo.com/groups/%s' % name)
  495. class VimeoReviewIE(InfoExtractor):
  496. IE_NAME = 'vimeo:review'
  497. IE_DESC = 'Review pages on vimeo'
  498. _VALID_URL = r'https?://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
  499. _TESTS = [{
  500. 'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
  501. 'md5': 'c507a72f780cacc12b2248bb4006d253',
  502. 'info_dict': {
  503. 'id': '75524534',
  504. 'ext': 'mp4',
  505. 'title': "DICK HARDWICK 'Comedian'",
  506. 'uploader': 'Richard Hardwick',
  507. }
  508. }, {
  509. 'note': 'video player needs Referer',
  510. 'url': 'http://vimeo.com/user22258446/review/91613211/13f927e053',
  511. 'md5': '6295fdab8f4bf6a002d058b2c6dce276',
  512. 'info_dict': {
  513. 'id': '91613211',
  514. 'ext': 'mp4',
  515. 'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
  516. 'uploader': 'DevWeek Events',
  517. 'duration': 2773,
  518. 'thumbnail': 're:^https?://.*\.jpg$',
  519. }
  520. }]
  521. def _real_extract(self, url):
  522. mobj = re.match(self._VALID_URL, url)
  523. video_id = mobj.group('id')
  524. player_url = 'https://player.vimeo.com/player/' + video_id
  525. return self.url_result(player_url, 'Vimeo', video_id)
  526. class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
  527. IE_NAME = 'vimeo:watchlater'
  528. IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
  529. _VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
  530. _LOGIN_REQUIRED = True
  531. _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
  532. _TESTS = [{
  533. 'url': 'http://vimeo.com/home/watchlater',
  534. 'only_matching': True,
  535. }]
  536. def _real_initialize(self):
  537. self._login()
  538. def _page_url(self, base_url, pagenum):
  539. url = '%s/page:%d/' % (base_url, pagenum)
  540. request = compat_urllib_request.Request(url)
  541. # Set the header to get a partial html page with the ids,
  542. # the normal page doesn't contain them.
  543. request.add_header('X-Requested-With', 'XMLHttpRequest')
  544. return request
  545. def _real_extract(self, url):
  546. return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
  547. class VimeoLikesIE(InfoExtractor):
  548. _VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
  549. IE_NAME = 'vimeo:likes'
  550. IE_DESC = 'Vimeo user likes'
  551. _TEST = {
  552. 'url': 'https://vimeo.com/user755559/likes/',
  553. 'playlist_mincount': 293,
  554. "info_dict": {
  555. 'id': 'user755559_likes',
  556. "description": "See all the videos urza likes",
  557. "title": 'Videos urza likes',
  558. },
  559. }
  560. def _real_extract(self, url):
  561. user_id = self._match_id(url)
  562. webpage = self._download_webpage(url, user_id)
  563. page_count = self._int(
  564. self._search_regex(
  565. r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
  566. .*?</a></li>\s*<li\s+class="pagination_next">
  567. ''', webpage, 'page count'),
  568. 'page count', fatal=True)
  569. PAGE_SIZE = 12
  570. title = self._html_search_regex(
  571. r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
  572. description = self._html_search_meta('description', webpage)
  573. def _get_page(idx):
  574. page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
  575. self.http_scheme(), user_id, idx + 1)
  576. webpage = self._download_webpage(
  577. page_url, user_id,
  578. note='Downloading page %d/%d' % (idx + 1, page_count))
  579. video_list = self._search_regex(
  580. r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
  581. webpage, 'video content')
  582. paths = re.findall(
  583. r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
  584. for path in paths:
  585. yield {
  586. '_type': 'url',
  587. 'url': compat_urlparse.urljoin(page_url, path),
  588. }
  589. pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
  590. return {
  591. '_type': 'playlist',
  592. 'id': 'user%s_likes' % user_id,
  593. 'title': title,
  594. 'description': description,
  595. 'entries': pl,
  596. }