PEP8: E225,E227

This commit is contained in:
Jouke Waleson 2014-11-23 21:23:05 +01:00
parent 8bcc875676
commit 2514d2635e
19 changed files with 43 additions and 43 deletions

View file

@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor):
if videolist_url:
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
formats = []
baseurl = vidurl[:vidurl.rfind('/') +1]
baseurl = vidurl[:vidurl.rfind('/') + 1]
for video in videolist.findall('.//video'):
src = video.get('src')
if not src:

View file

@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
formats = []
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
stream_quality, stream_format = self._FORMAT_IDS[fmt]
video_format = fmt +'p'
video_format = fmt + 'p'
streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/')
# urlencode doesn't work!
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' +stream_quality +'&media%5Fid=' +stream_id +'&video%5Fformat=' +stream_format
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' + stream_quality + '&media%5Fid=' + stream_id + '&video%5Fformat=' + stream_format
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
streamdata = self._download_xml(
@ -248,8 +248,8 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
subtitles = {}
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' +sub_id,\
video_id, note='Downloading subtitles for ' +sub_name)
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,\
video_id, note='Downloading subtitles for ' + sub_name)
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)

View file

@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor):
video_id = mobj.group('id')
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
info = self._download_json(info_url, video_id)
date = time.gmtime(info['dateCreated'] /1000) # The timestamp is in miliseconds
date = time.gmtime(info['dateCreated'] / 1000) # The timestamp is in miliseconds
return {
'id': video_id,

View file

@ -748,7 +748,7 @@ class GenericIE(InfoExtractor):
# Look for embedded blip.tv player
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
if mobj:
return self.url_result('http://blip.tv/a/a-' +mobj.group(1), 'BlipTV')
return self.url_result('http://blip.tv/a/a-' + mobj.group(1), 'BlipTV')
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage)
if mobj:
return self.url_result(mobj.group(1), 'BlipTV')

View file

@ -54,7 +54,7 @@ class IPrimaIE(InfoExtractor):
player_url = (
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
(floor(random() *1073741824), floor(random() *1073741824))
(floor(random() * 1073741824), floor(random() * 1073741824))
)
req = compat_urllib_request.Request(player_url)

View file

@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor):
if len(videos) == 1:
return make_entry(video_id, videos[0])
else:
return [make_entry(video_id, media, video_number +1) for video_number, media in enumerate(videos)]
return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]

View file

@ -184,7 +184,7 @@ class SmotriIE(InfoExtractor):
view_count = self._html_search_regex(
'Общее количество просмотров.*?<span class="Number">(\\d+)</span>',
webpage, 'view count', fatal=False, flags=re.MULTILINE|re.DOTALL)
webpage, 'view count', fatal=False, flags=re.MULTILINE | re.DOTALL)
return {
'id': video_id,

View file

@ -69,7 +69,7 @@ class SohuIE(InfoExtractor):
(allot, prot, clipsURL[i], su[i]))
part_str = self._download_webpage(
part_url, video_id,
note=u'Downloading part %d of %d' % (i +1, part_count))
note=u'Downloading part %d of %d' % (i + 1, part_count))
part_info = part_str.split('|')
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])

View file

@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
if media_type == 'Video':
fmt.update({
'format_note': ['144p', '288p', '544p', '720p'][quality -1],
'format_note': ['144p', '288p', '544p', '720p'][quality - 1],
'vcodec': codec,
})
elif media_type == 'Audio':

View file

@ -118,5 +118,5 @@ class ThePlatformIE(InfoExtractor):
'formats': formats,
'description': info['description'],
'thumbnail': info['defaultThumbnailUrl'],
'duration': info['duration'] //1000,
'duration': info['duration'] // 1000,
}

View file

@ -37,7 +37,7 @@ class TudouIE(InfoExtractor):
}]
def _url_for_id(self, id, quality = None):
info_url = "http://v2.tudou.com/f?id=" +str(id)
info_url = "http://v2.tudou.com/f?id=" + str(id)
if quality:
info_url += '&hd' + quality
webpage = self._download_webpage(info_url, id, "Opening the info webpage")

View file

@ -97,7 +97,7 @@ class XTubeUserIE(InfoExtractor):
url, username, note='Retrieving profile page')
video_count = int(self._search_regex(
r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' %username, profile_page,
r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page,
'video count'))
PAGE_SIZE = 25

View file

@ -229,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor):
for pagenum in itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query,
note='Downloading results page ' +str(pagenum +1))
note='Downloading results page ' + str(pagenum + 1))
m = info['m']
results = info['results']