Browse Source
[revision3] Add new extractor(closes #6388)
[revision3] Add new extractor(closes #6388)
- revision3.com - testtube.com - animalist.commaster
2 changed files with 104 additions and 0 deletions
Unified View
Diff Options
@ -0,0 +1,103 @@ |
|||||
|
# coding: utf-8 |
||||
|
from __future__ import unicode_literals |
||||
|
|
||||
|
import re |
||||
|
|
||||
|
from .common import InfoExtractor |
||||
|
from ..compat import compat_str |
||||
|
from ..utils import ( |
||||
|
int_or_none, |
||||
|
parse_iso8601, |
||||
|
unescapeHTML, |
||||
|
) |
||||
|
|
||||
|
|
||||
|
class Revision3IE(InfoExtractor): |
||||
|
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:revision3|testtube|animalist)\.com)/(?P<id>[^/]+(?:/[^/?#]+)?)' |
||||
|
_TESTS = [{ |
||||
|
'url': 'http://www.revision3.com/technobuffalo/5-google-predictions-for-2016', |
||||
|
'md5': 'd94a72d85d0a829766de4deb8daaf7df', |
||||
|
'info_dict': { |
||||
|
'id': '73034', |
||||
|
'ext': 'webm', |
||||
|
'title': '5 Google Predictions for 2016', |
||||
|
'description': 'Google had a great 2015, but it\'s already time to look ahead. Here are our five predictions for 2016.', |
||||
|
'upload_date': '20151228', |
||||
|
'timestamp': 1451325600, |
||||
|
'duration': 187, |
||||
|
} |
||||
|
}, { |
||||
|
'url': 'http://testtube.com/brainstuff', |
||||
|
'info_dict': { |
||||
|
'id': '251', |
||||
|
'title': 'BrainStuff', |
||||
|
'description': 'Whether the topic is popcorn or particle physics, you can count on the HowStuffWorks team to explore-and explain-the everyday science in the world around us on BrainStuff.', |
||||
|
}, |
||||
|
'playlist_mincount': 93, |
||||
|
}] |
||||
|
_PAGE_DATA_TEMPLATE = 'http://www.%s/apiProxy/ddn/%s?domain=%s' |
||||
|
_API_KEY = 'ba9c741bce1b9d8e3defcc22193f3651b8867e62' |
||||
|
|
||||
|
def _real_extract(self, url): |
||||
|
domain, display_id = re.match(self._VALID_URL, url).groups() |
||||
|
page_info = self._download_json( |
||||
|
self._PAGE_DATA_TEMPLATE % (domain, display_id, domain), display_id) |
||||
|
|
||||
|
if page_info['data']['type'] == 'episode': |
||||
|
episode_data = page_info['data'] |
||||
|
video_id = compat_str(episode_data['video']['data']['id']) |
||||
|
video_data = self._download_json( |
||||
|
'http://revision3.com/api/getPlaylist.json?api_key=%s&codecs=h264,vp8,theora&video_id=%s' % (self._API_KEY, video_id), |
||||
|
video_id)['items'][0] |
||||
|
|
||||
|
formats = [] |
||||
|
for media_type, media in video_data['media'].items(): |
||||
|
for quality_id, quality in media.items(): |
||||
|
if quality_id == 'hls': |
||||
|
formats.extend(self._extract_m3u8_formats( |
||||
|
quality['url'], video_id, 'mp4', |
||||
|
'm3u8_native', m3u8_id='hls', fatal=False)) |
||||
|
else: |
||||
|
formats.append({ |
||||
|
'url': quality['url'], |
||||
|
'format_id': '%s-%s' % (media_type, quality_id), |
||||
|
'tbr': int_or_none(quality.get('bitrate')), |
||||
|
}) |
||||
|
self._sort_formats(formats) |
||||
|
|
||||
|
thumbnails = [{ |
||||
|
'url': image_url, |
||||
|
'id': image_id, |
||||
|
} for image_id, image_url in video_data.get('images', {}).items()] |
||||
|
|
||||
|
return { |
||||
|
'id': video_id, |
||||
|
'title': unescapeHTML(video_data['title']), |
||||
|
'description': unescapeHTML(video_data.get('summary')), |
||||
|
'timestamp': parse_iso8601(episode_data.get('publishTime'), ' '), |
||||
|
'author': episode_data.get('author'), |
||||
|
'duration': int_or_none(video_data.get('duration')), |
||||
|
'thumbnails': thumbnails, |
||||
|
'formats': formats, |
||||
|
} |
||||
|
else: |
||||
|
show_data = page_info['show']['data'] |
||||
|
episodes_data = page_info['episodes']['data'] |
||||
|
num_episodes = page_info['meta']['totalEpisodes'] |
||||
|
processed_episodes = 0 |
||||
|
entries = [] |
||||
|
page_num = 1 |
||||
|
while True: |
||||
|
entries.extend([self.url_result( |
||||
|
url + '/%s' % episode['slug']) for episode in episodes_data]) |
||||
|
processed_episodes += len(episodes_data) |
||||
|
if processed_episodes == num_episodes: |
||||
|
break |
||||
|
page_num += 1 |
||||
|
episodes_data = self._download_json(self._PAGE_DATA_TEMPLATE % ( |
||||
|
domain, display_id + '/' + compat_str(page_num), domain), |
||||
|
display_id)['episodes']['data'] |
||||
|
|
||||
|
return self.playlist_result( |
||||
|
entries, compat_str(show_data['id']), |
||||
|
show_data.get('name'), show_data.get('summary')) |
Write
Preview
Loading…
Cancel
Save