20 changed files with 464 additions and 117 deletions
Split View
Diff Options
-
1test/test_all_urls.py
-
2test/test_playlists.py
-
20test/test_youtube_lists.py
-
61youtube_dl/YoutubeDL.py
-
1youtube_dl/__init__.py
-
4youtube_dl/extractor/__init__.py
-
53youtube_dl/extractor/bandcamp.py
-
18youtube_dl/extractor/brightcove.py
-
53youtube_dl/extractor/clipfish.py
-
7youtube_dl/extractor/collegehumor.py
-
10youtube_dl/extractor/common.py
-
2youtube_dl/extractor/howcast.py
-
2youtube_dl/extractor/mixcloud.py
-
131youtube_dl/extractor/niconico.py
-
1youtube_dl/extractor/streamcloud.py
-
91youtube_dl/extractor/viki.py
-
86youtube_dl/extractor/youtube.py
-
8youtube_dl/update.py
-
28youtube_dl/utils.py
-
2youtube_dl/version.py
@ -0,0 +1,53 @@ |
|||
import re |
|||
import time |
|||
import xml.etree.ElementTree |
|||
|
|||
from .common import InfoExtractor |
|||
|
|||
|
|||
class ClipfishIE(InfoExtractor): |
|||
IE_NAME = u'clipfish' |
|||
|
|||
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/' |
|||
_TEST = { |
|||
u'url': u'http://www.clipfish.de/special/supertalent/video/4028320/supertalent-2013-ivana-opacak-singt-nobodys-perfect/', |
|||
u'file': u'4028320.f4v', |
|||
u'md5': u'5e38bda8c329fbfb42be0386a3f5a382', |
|||
u'info_dict': { |
|||
u'title': u'Supertalent 2013: Ivana Opacak singt Nobody\'s Perfect', |
|||
u'duration': 399, |
|||
} |
|||
} |
|||
|
|||
def _real_extract(self, url): |
|||
mobj = re.match(self._VALID_URL, url) |
|||
video_id = mobj.group(1) |
|||
|
|||
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' % |
|||
(video_id, int(time.time()))) |
|||
info_xml = self._download_webpage( |
|||
info_url, video_id, note=u'Downloading info page') |
|||
doc = xml.etree.ElementTree.fromstring(info_xml) |
|||
title = doc.find('title').text |
|||
video_url = doc.find('filename').text |
|||
thumbnail = doc.find('imageurl').text |
|||
duration_str = doc.find('duration').text |
|||
m = re.match( |
|||
r'^(?P<hours>[0-9]+):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2}):(?P<ms>[0-9]*)$', |
|||
duration_str) |
|||
if m: |
|||
duration = ( |
|||
(int(m.group('hours')) * 60 * 60) + |
|||
(int(m.group('minutes')) * 60) + |
|||
(int(m.group('seconds'))) |
|||
) |
|||
else: |
|||
duration = None |
|||
|
|||
return { |
|||
'id': video_id, |
|||
'title': title, |
|||
'url': video_url, |
|||
'thumbnail': thumbnail, |
|||
'duration': duration, |
|||
} |
@ -0,0 +1,131 @@ |
|||
# encoding: utf-8 |
|||
|
|||
import re |
|||
import socket |
|||
import xml.etree.ElementTree |
|||
|
|||
from .common import InfoExtractor |
|||
from ..utils import ( |
|||
compat_http_client, |
|||
compat_urllib_error, |
|||
compat_urllib_parse, |
|||
compat_urllib_request, |
|||
compat_urlparse, |
|||
compat_str, |
|||
|
|||
ExtractorError, |
|||
unified_strdate, |
|||
) |
|||
|
|||
|
|||
class NiconicoIE(InfoExtractor): |
|||
IE_NAME = u'niconico' |
|||
IE_DESC = u'ニコニコ動画' |
|||
|
|||
_TEST = { |
|||
u'url': u'http://www.nicovideo.jp/watch/sm22312215', |
|||
u'file': u'sm22312215.mp4', |
|||
u'md5': u'd1a75c0823e2f629128c43e1212760f9', |
|||
u'info_dict': { |
|||
u'title': u'Big Buck Bunny', |
|||
u'uploader': u'takuya0301', |
|||
u'uploader_id': u'2698420', |
|||
u'upload_date': u'20131123', |
|||
u'description': u'(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org', |
|||
}, |
|||
u'params': { |
|||
u'username': u'ydl.niconico@gmail.com', |
|||
u'password': u'youtube-dl', |
|||
}, |
|||
} |
|||
|
|||
_VALID_URL = r'^https?://(?:www\.|secure\.)?nicovideo\.jp/watch/([a-z][a-z][0-9]+)(?:.*)$' |
|||
_NETRC_MACHINE = 'niconico' |
|||
# If True it will raise an error if no login info is provided |
|||
_LOGIN_REQUIRED = True |
|||
|
|||
def _real_initialize(self): |
|||
self._login() |
|||
|
|||
def _login(self): |
|||
(username, password) = self._get_login_info() |
|||
# No authentication to be performed |
|||
if username is None: |
|||
if self._LOGIN_REQUIRED: |
|||
raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True) |
|||
return False |
|||
|
|||
# Log in |
|||
login_form_strs = { |
|||
u'mail': username, |
|||
u'password': password, |
|||
} |
|||
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode |
|||
# chokes on unicode |
|||
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items()) |
|||
login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8') |
|||
request = compat_urllib_request.Request( |
|||
u'https://secure.nicovideo.jp/secure/login', login_data) |
|||
login_results = self._download_webpage( |
|||
request, u'', note=u'Logging in', errnote=u'Unable to log in') |
|||
if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None: |
|||
self._downloader.report_warning(u'unable to log in: bad username or password') |
|||
return False |
|||
return True |
|||
|
|||
def _real_extract(self, url): |
|||
mobj = re.match(self._VALID_URL, url) |
|||
video_id = mobj.group(1) |
|||
|
|||
# Get video webpage. We are not actually interested in it, but need |
|||
# the cookies in order to be able to download the info webpage |
|||
self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id) |
|||
|
|||
video_info_webpage = self._download_webpage( |
|||
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id, |
|||
note=u'Downloading video info page') |
|||
|
|||
# Get flv info |
|||
flv_info_webpage = self._download_webpage( |
|||
u'http://flapi.nicovideo.jp/api/getflv?v=' + video_id, |
|||
video_id, u'Downloading flv info') |
|||
video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0] |
|||
|
|||
# Start extracting information |
|||
video_info = xml.etree.ElementTree.fromstring(video_info_webpage) |
|||
video_title = video_info.find('.//title').text |
|||
video_extension = video_info.find('.//movie_type').text |
|||
video_format = video_extension.upper() |
|||
video_thumbnail = video_info.find('.//thumbnail_url').text |
|||
video_description = video_info.find('.//description').text |
|||
video_uploader_id = video_info.find('.//user_id').text |
|||
video_upload_date = unified_strdate(video_info.find('.//first_retrieve').text.split('+')[0]) |
|||
video_view_count = video_info.find('.//view_counter').text |
|||
video_webpage_url = video_info.find('.//watch_url').text |
|||
|
|||
# uploader |
|||
video_uploader = video_uploader_id |
|||
url = 'http://seiga.nicovideo.jp/api/user/info?id=' + video_uploader_id |
|||
try: |
|||
user_info_webpage = self._download_webpage( |
|||
url, video_id, note=u'Downloading user information') |
|||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: |
|||
self._downloader.report_warning(u'Unable to download user info webpage: %s' % compat_str(err)) |
|||
else: |
|||
user_info = xml.etree.ElementTree.fromstring(user_info_webpage) |
|||
video_uploader = user_info.find('.//nickname').text |
|||
|
|||
return { |
|||
'id': video_id, |
|||
'url': video_real_url, |
|||
'title': video_title, |
|||
'ext': video_extension, |
|||
'format': video_format, |
|||
'thumbnail': video_thumbnail, |
|||
'description': video_description, |
|||
'uploader': video_uploader, |
|||
'upload_date': video_upload_date, |
|||
'uploader_id': video_uploader_id, |
|||
'view_count': video_view_count, |
|||
'webpage_url': video_webpage_url, |
|||
} |
@ -0,0 +1,91 @@ |
|||
import re |
|||
|
|||
from ..utils import ( |
|||
unified_strdate, |
|||
) |
|||
from .subtitles import SubtitlesInfoExtractor |
|||
|
|||
|
|||
class VikiIE(SubtitlesInfoExtractor): |
|||
IE_NAME = u'viki' |
|||
|
|||
_VALID_URL = r'^https?://(?:www\.)?viki\.com/videos/(?P<id>[0-9]+v)' |
|||
_TEST = { |
|||
u'url': u'http://www.viki.com/videos/1023585v-heirs-episode-14', |
|||
u'file': u'1023585v.mp4', |
|||
u'md5': u'a21454021c2646f5433514177e2caa5f', |
|||
u'info_dict': { |
|||
u'title': u'Heirs Episode 14', |
|||
u'uploader': u'SBS', |
|||
u'description': u'md5:c4b17b9626dd4b143dcc4d855ba3474e', |
|||
u'upload_date': u'20131121', |
|||
u'age_limit': 13, |
|||
} |
|||
} |
|||
|
|||
def _real_extract(self, url): |
|||
mobj = re.match(self._VALID_URL, url) |
|||
video_id = mobj.group(1) |
|||
|
|||
webpage = self._download_webpage(url, video_id) |
|||
title = self._og_search_title(webpage) |
|||
description = self._og_search_description(webpage) |
|||
thumbnail = self._og_search_thumbnail(webpage) |
|||
|
|||
uploader = self._html_search_regex( |
|||
r'<strong>Broadcast Network: </strong>\s*([^<]*)<', webpage, |
|||
u'uploader') |
|||
if uploader is not None: |
|||
uploader = uploader.strip() |
|||
|
|||
rating_str = self._html_search_regex( |
|||
r'<strong>Rating: </strong>\s*([^<]*)<', webpage, |
|||
u'rating information', default='').strip() |
|||
RATINGS = { |
|||
'G': 0, |
|||
'PG': 10, |
|||
'PG-13': 13, |
|||
'R': 16, |
|||
'NC': 18, |
|||
} |
|||
age_limit = RATINGS.get(rating_str) |
|||
|
|||
info_url = 'http://www.viki.com/player5_fragment/%s?action=show&controller=videos' % video_id |
|||
info_webpage = self._download_webpage(info_url, video_id) |
|||
video_url = self._html_search_regex( |
|||
r'<source[^>]+src="([^"]+)"', info_webpage, u'video URL') |
|||
|
|||
upload_date_str = self._html_search_regex( |
|||
r'"created_at":"([^"]+)"', info_webpage, u'upload date') |
|||
upload_date = ( |
|||
unified_strdate(upload_date_str) |
|||
if upload_date_str is not None |
|||
else None |
|||
) |
|||
|
|||
# subtitles |
|||
video_subtitles = self.extract_subtitles(video_id, info_webpage) |
|||
if self._downloader.params.get('listsubtitles', False): |
|||
self._list_available_subtitles(video_id, info_webpage) |
|||
return |
|||
|
|||
return { |
|||
'id': video_id, |
|||
'title': title, |
|||
'url': video_url, |
|||
'description': description, |
|||
'thumbnail': thumbnail, |
|||
'age_limit': age_limit, |
|||
'uploader': uploader, |
|||
'subtitles': video_subtitles, |
|||
'upload_date': upload_date, |
|||
} |
|||
|
|||
def _get_available_subtitles(self, video_id, info_webpage): |
|||
res = {} |
|||
for sturl in re.findall(r'<track src="([^"]+)"/>'): |
|||
m = re.search(r'/(?P<lang>[a-z]+)\.vtt', sturl) |
|||
if not m: |
|||
continue |
|||
res[m.group('lang')] = sturl |
|||
return res |
@ -1,2 +1,2 @@ |
|||
|
|||
__version__ = '2013.11.22' |
|||
__version__ = '2013.11.24.1' |
Write
Preview
Loading…
Cancel
Save