Compare commits

...

10 Commits

Author SHA1 Message Date
dirkf
956b8c5855 [YouTube] Bug-fix for c1f5c3274a 2025-11-26 03:02:36 +00:00
dirkf
d5f561166b [core] Re-work format_note display in format list with abbreviated codec name 2025-11-26 03:02:36 +00:00
dirkf
d0283f5385 [YouTube] Revert forcing player JS by default
* still leaving the parameters in place

thx bashonly for confirming this suggestion
2025-11-21 01:52:11 +00:00
dirkf
6315f4b1df [utils] Support additional codecs and dynamic_range 2025-11-21 01:52:11 +00:00
dirkf
aeb1254fcf [YouTube] Fix playlist thumbnail extraction
Thx seproDev, yt-dlp/yt-dlp#11615
2025-11-21 01:52:11 +00:00
dirkf
25890f2ad1 [YouTube] Improve detection of geo-restriction
Thx yt-dlp
2025-11-21 01:52:11 +00:00
dirkf
d65882a022 [YouTube] Improve mark_watched()
Thx: Brett824, yt-dlp/yt-dlp#4146
2025-11-21 01:52:11 +00:00
dirkf
39378f7b5c [YouTube] Fix incorrect chapter extraction
* align `_get_text()` with yt-dlp (thx, passim) at last
2025-11-21 01:52:11 +00:00
dirkf
6f5d4c3289 [YouTube] Improve targeting of pre-roll wait
Experimental for now.
Thx: yt-dlp/yt-dlp#14646
2025-11-21 01:52:11 +00:00
dirkf
5d445f8c5f [YouTube] Re-work client selection
* use `android_sdkless` by default
* use `web_safari` (HLS only) if logged in
* skip any non-HLS format with n-challenge
2025-11-21 01:52:11 +00:00
5 changed files with 282 additions and 167 deletions

View File

@@ -902,6 +902,30 @@ class TestUtil(unittest.TestCase):
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
})
self.assertEqual(parse_codecs('vp9.2'), {
'vcodec': 'vp9.2',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('vp09.02.50.10.01.09.18.09.00'), {
'vcodec': 'vp09.02.50.10.01.09.18.09.00',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('av01.0.12M.10.0.110.09.16.09.0'), {
'vcodec': 'av01.0.12M.10.0.110.09.16.09.0',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('dvhe'), {
'vcodec': 'dvhe',
'acodec': 'none',
'dynamic_range': 'DV',
})
self.assertEqual(parse_codecs('fLaC'), {
'vcodec': 'none',
'acodec': 'flac',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',

View File

@@ -2404,60 +2404,52 @@ class YoutubeDL(object):
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
def simplified_codec(f, field):
assert field in ('acodec', 'vcodec')
codec = f.get(field)
return (
'unknown' if not codec
else '.'.join(codec.split('.')[:4]) if codec != 'none'
else 'images' if field == 'vcodec' and f.get('acodec') == 'none'
else None if field == 'acodec' and f.get('vcodec') == 'none'
else 'audio only' if field == 'vcodec'
else 'video only')
res = join_nonempty(
fdict.get('ext') in ('f4f', 'f4m') and '(unsupported)',
fdict.get('language') and ('[%s]' % (fdict['language'],)),
fdict.get('format_note') is not None and fdict['format_note'],
fdict.get('tbr') is not None and ('%4dk' % fdict['tbr']),
delim=' ')
res = [res] if res else []
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
res.append('%s container' % (fdict['container'],))
if fdict.get('vcodec') not in (None, 'none'):
codec = simplified_codec(fdict, 'vcodec')
if codec and fdict.get('vbr') is not None:
codec += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
codec = 'video@'
else:
codec = None
codec = join_nonempty(codec, fdict.get('vbr') is not None and ('%4dk' % fdict['vbr']))
if codec:
res.append(codec)
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
res.append('%sfps' % (fdict['fps'],))
codec = (
simplified_codec(fdict, 'acodec') if fdict.get('acodec') is not None
else 'audio' if fdict.get('abr') is not None else None)
if codec:
res.append(join_nonempty(
'%-4s' % (codec + (('@%3dk' % fdict['abr']) if fdict.get('abr') else ''),),
fdict.get('asr') and '(%5dHz)' % fdict['asr'], delim=' '))
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
res.append(format_bytes(fdict['filesize']))
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
res.append('~' + format_bytes(fdict['filesize_approx']))
return ', '.join(res)
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])

View File

@@ -483,6 +483,12 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
('responseContext', 'visitorData')),
T(compat_str)))
# @functools.cached_property
def is_authenticated(self, _cache={}):
if self not in _cache:
_cache[self] = bool(self._generate_sapisidhash_header())
return _cache[self]
def _extract_ytcfg(self, video_id, webpage):
ytcfg = self._search_json(
r'ytcfg\.set\s*\(', webpage, 'ytcfg', video_id,
@@ -527,6 +533,27 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'uploader': uploader,
}
@staticmethod
def _get_text(data, *path_list, **kw_max_runs):
max_runs = kw_max_runs.get('max_runs')
for path in path_list or [None]:
if path is None:
obj = [data] # shortcut
else:
obj = traverse_obj(data, tuple(variadic(path) + (all,)))
for runs in traverse_obj(
obj, ('simpleText', {'text': T(compat_str)}, all, filter),
('runs', lambda _, r: isinstance(r.get('text'), compat_str), all, filter),
(T(list), lambda _, r: isinstance(r.get('text'), compat_str)),
default=[]):
max_runs = int_or_none(max_runs, default=len(runs))
if max_runs < len(runs):
runs = runs[:max_runs]
text = ''.join(traverse_obj(runs, (Ellipsis, 'text')))
if text:
return text
@staticmethod
def _extract_thumbnails(data, *path_list, **kw_final_key):
"""
@@ -1642,10 +1669,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'_rtmp': {'protocol': 'rtmp'},
# av01 video only formats sometimes served with "unknown" codecs
'394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'394': {'acodec': 'none', 'vcodec': 'av01.0.00M.08'},
'395': {'acodec': 'none', 'vcodec': 'av01.0.00M.08'},
'396': {'acodec': 'none', 'vcodec': 'av01.0.01M.08'},
'397': {'acodec': 'none', 'vcodec': 'av01.0.04M.08'},
'398': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
'399': {'acodec': 'none', 'vcodec': 'av01.0.08M.08'},
}
_PLAYER_JS_VARIANT_MAP = (
@@ -1672,16 +1701,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self._player_cache = {}
def _get_player_js_version(self):
player_js_version = self.get_param('youtube_player_js_version') or '20348@0004de42'
sts_hash = self._search_regex(
('^actual$(^)?(^)?', r'^([0-9]{5,})@([0-9a-f]{8,})$'),
player_js_version, 'player_js_version', group=(1, 2), default=None)
if sts_hash:
return sts_hash
self.report_warning(
'Invalid player JS version "{0}" specified. '
'It should be "{1}" or in the format of {2}'.format(
player_js_version, 'actual', 'SignatureTimeStamp@Hash'), only_once=True)
player_js_version = self.get_param('youtube_player_js_version')
if player_js_version:
sts_hash = self._search_regex(
('^actual$(^)?(^)?', r'^([0-9]{5,})@([0-9a-f]{8,})$'),
player_js_version, 'player_js_version', group=(1, 2), default=None)
if sts_hash:
return sts_hash
self.report_warning(
'Invalid player JS version "{0}" specified. '
'It should be "{1}" or in the format of {2}'.format(
player_js_version, 'actual', 'SignatureTimeStamp@Hash'), only_once=True)
return None, None
# *ytcfgs, webpage=None
@@ -1696,18 +1726,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
ytcfgs = ytcfgs + ({'PLAYER_JS_URL': player_url},)
player_url = traverse_obj(
ytcfgs, (Ellipsis, 'PLAYER_JS_URL'), (Ellipsis, 'WEB_PLAYER_CONTEXT_CONFIGS', Ellipsis, 'jsUrl'),
get_all=False, expected_type=lambda u: urljoin('https://www.youtube.com', u))
get_all=False, expected_type=self._yt_urljoin)
player_id_override = self._get_player_js_version()[1]
requested_js_variant = self.get_param('youtube_player_js_variant') or 'main'
requested_js_variant = self.get_param('youtube_player_js_variant')
variant_js = next(
(v for k, v in self._PLAYER_JS_VARIANT_MAP if k == requested_js_variant),
None)
if variant_js:
player_id_override = self._get_player_js_version()[1]
player_id = player_id_override or self._extract_player_info(player_url)
original_url = player_url
player_url = '/s/player/{0}/{1}'.format(player_id, variant_js)
player_url = self._yt_urljoin(
'/s/player/{0}/{1}'.format(player_id, variant_js))
if original_url != player_url:
self.write_debug(
'Forcing "{0}" player JS variant for player {1}\n'
@@ -1721,7 +1751,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
requested_js_variant, ','.join(k for k, _ in self._PLAYER_JS_VARIANT_MAP)),
only_once=True)
return urljoin('https://www.youtube.com', player_url)
return player_url
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
@@ -2101,8 +2131,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return self._cached(self._decrypt_nsig, 'nsig', n, player_url)
for fmt in formats:
parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url'])
n_param = compat_parse_qs(parsed_fmt_url.query).get('n')
n_param = parse_qs(fmt['url']).get('n')
if not n_param:
continue
n_param = n_param[-1]
@@ -2151,32 +2180,35 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return sts
def _mark_watched(self, video_id, player_response):
playback_url = url_or_none(try_get(
player_response,
lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl']))
if not playback_url:
return
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = string.ascii_letters + string.digits + '-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16))
# more consistent results setting it to right before the end
qs = parse_qs(playback_url)
video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
for is_full, key in enumerate(('videostatsPlaybackUrl', 'videostatsWatchtimeUrl')):
label = 'fully ' if is_full > 0 else ''
playback_url = update_url_query(
playback_url, {
'ver': '2',
'cpn': cpn,
'cmt': video_length,
'el': 'detailpage', # otherwise defaults to "shorts"
})
playback_url = traverse_obj(player_response, (
'playbackTracking'. key, 'baseUrl', T(url_or_none)))
if not playback_url:
self.report_warning('Unable to mark {0}watched'.format(label))
continue
self._download_webpage(
playback_url, video_id, 'Marking watched',
'Unable to mark watched', fatal=False)
# more consistent results setting it to right before the end
qs = parse_qs(playback_url)
video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
playback_url = update_url_query(
playback_url, {
'ver': '2',
'cpn': cpn,
'cmt': video_length,
'el': 'detailpage', # otherwise defaults to "shorts"
})
self._download_webpage(
playback_url, video_id, 'Marking {0}watched'.format(label),
'Unable to mark watched', fatal=False)
@staticmethod
def _extract_urls(webpage):
@@ -2268,6 +2300,49 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
def _get_preroll_length(self, ad_slot_lists):
def parse_instream_ad_renderer(instream_renderer):
for skippable, path in (
('', ('skipOffsetMilliseconds', T(int))),
('non-', ('playerVars', T(compat_parse_qs),
'length_seconds', -1, T(int_or_none(invscale=1000))))):
length_ms = traverse_obj(instream_renderer, path)
if length_ms is not None:
self.write_debug('Detected a %ds %sskippable ad' % (
length_ms // 1000, skippable))
break
return length_ms
for slot_renderer in traverse_obj(ad_slot_lists, ('adSlots', Ellipsis, 'adSlotRenderer', T(dict))):
if traverse_obj(slot_renderer, ('adSlotMetadata', 'triggerEvent')) != 'SLOT_TRIGGER_EVENT_BEFORE_CONTENT':
continue
rendering_content = traverse_obj(slot_renderer, (
'fulfillmentContent', 'fulfilledLayout', 'playerBytesAdLayoutRenderer',
'renderingContent', 'instreamVideoAdRenderer', T(dict)))
length_ms = parse_instream_ad_renderer(rendering_content)
if length_ms is not None:
return length_ms
times = traverse_obj(rendering_content, ((
('playerBytesSequentialLayoutRenderer', 'sequentialLayouts'),
None), any, Ellipsis, 'playerBytesAdLayoutRenderer',
'renderingContent', 'instreamVideoAdRenderer',
T(parse_instream_ad_renderer)))
if times:
return sum(times)
return 0
def _is_premium_subscriber(self, initial_data):
if not self.is_authenticated or not initial_data:
return False
tlr = traverse_obj(
initial_data, ('topbar', 'desktopTopbarRenderer', 'logo', 'topbarLogoRenderer'))
return (
traverse_obj(tlr, ('iconImage', 'iconType')) == 'YOUTUBE_PREMIUM_LOGO'
or 'premium' in (self._get_text(tlr, 'tooltipText') or '').lower()
)
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
@@ -2295,32 +2370,36 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if True or not player_response:
origin = 'https://www.youtube.com'
pb_context = {'html5Preference': 'HTML5_PREF_WANTS'}
fetched_timestamp = int(time.time())
player_url = self._extract_player_url(webpage)
ytcfg = self._extract_ytcfg(video_id, webpage or '')
sts = self._extract_signature_timestamp(video_id, player_url, ytcfg)
if sts:
pb_context['signatureTimestamp'] = sts
client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
T(dict.items), lambda _, k_v: not k_v[1].get('REQUIRE_PO_TOKEN'),
0))[:1]
auth = self._generate_sapisidhash_header(origin)
client_names = []
if auth or self._is_premium_subscriber(player_response):
client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
T(dict_items), lambda _, k_v: k_v[0] == 'web_safari', 0))[:1]
if not client_names:
client_names = traverse_obj(self._INNERTUBE_CLIENTS, (
T(dict_items), lambda _, k_v: not (
k_v[1].get('REQUIRE_PO_TOKEN')
or (bool(k_v[1].get('WITH_COOKIES', auth)) ^ bool(auth))
), 0))[:1]
if 'web' not in client_names:
# webpage links won't download: ignore links and playability
# only live HLS webpage links will download: ignore playability
player_response = filter_dict(
player_response or {},
lambda k, _: k not in ('streamingData', 'playabilityStatus'))
if is_live and 'ios' not in client_names:
client_names.append('ios')
lambda k, _: k != 'playabilityStatus')
headers = {
'Sec-Fetch-Mode': 'navigate',
'Origin': origin,
'X-Goog-Visitor-Id': self._extract_visitor_data(ytcfg) or '',
}
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
@@ -2350,7 +2429,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'INNERTUBE_CONTEXT', 'client', 'clientVersion'),
'User-Agent': (
'INNERTUBE_CONTEXT', 'client', 'userAgent'),
}))
}) or {})
api_player_response = self._call_api(
'player', query, video_id, fatal=False, headers=api_headers,
@@ -2359,19 +2438,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'context', 'client', 'clientName')),
'API JSON', delim=' '))
hls = traverse_obj(
(player_response, api_player_response),
(Ellipsis, 'streamingData', 'hlsManifestUrl', T(url_or_none)))
# be sure to find HLS in case of is_live
hls = traverse_obj(player_response, (
'streamingData', 'hlsManifestUrl', T(url_or_none)))
fetched_timestamp = int(time.time())
if len(hls) == 2 and not hls[0] and hls[1]:
player_response['streamingData']['hlsManifestUrl'] = hls[1]
else:
video_details = merge_dicts(*traverse_obj(
(player_response, api_player_response),
(Ellipsis, 'videoDetails', T(dict))))
player_response.update(filter_dict(
api_player_response or {}, cndn=lambda k, _: k != 'captions'))
player_response['videoDetails'] = video_details
preroll_length_ms = (
self._get_preroll_length(api_player_response)
or self._get_preroll_length(player_response))
video_details = merge_dicts(*traverse_obj(
(player_response, api_player_response),
(Ellipsis, 'videoDetails', T(dict))))
player_response.update(filter_dict(
api_player_response or {}, cndn=lambda k, _: k != 'captions'))
player_response['videoDetails'] = video_details
if hls and not traverse_obj(player_response, (
'streamingData', 'hlsManifestUrl', T(url_or_none))):
player_response['streamingData']['hlsManifestUrl'] = hls
def is_agegated(playability):
# playability: dict
@@ -2438,10 +2520,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
def get_text(x):
return ''.join(traverse_obj(
x, (('simpleText',),), ('runs', Ellipsis, 'text'),
expected_type=compat_str))
get_text = lambda x: self._get_text(x) or ''
search_meta = (
(lambda x: self._html_search_meta(x, webpage, default=None))
@@ -2529,7 +2608,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
elif fetched_timestamp is not None:
# Handle preroll waiting period
preroll_sleep = self.get_param('youtube_preroll_sleep')
preroll_sleep = int_or_none(preroll_sleep, default=6)
preroll_sleep = min(6, int_or_none(preroll_sleep, default=preroll_length_ms / 1000))
fetched_timestamp += preroll_sleep
for fmt in streaming_formats:
@@ -2575,6 +2654,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self.write_debug(error_to_compat_str(e), only_once=True)
continue
if parse_qs(fmt_url).get('n'):
# this and (we assume) all the formats here are n-scrambled
break
language_preference = (
10 if audio_track.get('audioIsDefault')
else -10 if 'descriptive' in (traverse_obj(audio_track, ('displayName', T(lower))) or '')
@@ -2707,7 +2790,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
subreason = pemr.get('subreason')
if subreason:
subreason = clean_html(get_text(subreason))
if subreason == 'The uploader has not made this video available in your country.':
if subreason.startswith('The uploader has not made this video available in your country'):
countries = microformat.get('availableCountries')
if not countries:
regions_allowed = search_meta('regionsAllowed')
@@ -2901,24 +2984,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
chapters = self._extract_chapters_from_json(
initial_data, video_id, duration)
if not chapters:
for engagment_pannel in (initial_data.get('engagementPanels') or []):
contents = try_get(
engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'],
list)
if not contents:
continue
def chapter_time(mmlir):
return parse_duration(
get_text(mmlir.get('timeDescription')))
def chapter_time(mmlir):
return parse_duration(
get_text(mmlir.get('timeDescription')))
for markers in traverse_obj(initial_data, (
'engagementPanels', Ellipsis, 'engagementPanelSectionListRenderer',
'content', 'macroMarkersListRenderer', 'contents', T(list))):
chapters = []
for next_num, content in enumerate(contents, start=1):
for next_num, content in enumerate(markers, start=1):
mmlir = content.get('macroMarkersListItemRenderer') or {}
start_time = chapter_time(mmlir)
end_time = (traverse_obj(
contents, (next_num, 'macroMarkersListItemRenderer', T(chapter_time)))
if next_num < len(contents) else duration)
end_time = (traverse_obj(markers, (
next_num, 'macroMarkersListItemRenderer', T(chapter_time)))
if next_num < len(markers) else duration)
if start_time is None or end_time is None:
continue
chapters.append({
@@ -3477,12 +3557,6 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
T(dict.items), lambda _, k_v: k_v[0].startswith('grid') and k_v[0].endswith('Renderer'),
1, T(dict)), get_all=False)
@staticmethod
def _get_text(r, k):
return traverse_obj(
r, (k, 'runs', 0, 'text'), (k, 'simpleText'),
expected_type=txt_or_none)
def _grid_entries(self, grid_renderer):
for item in traverse_obj(grid_renderer, ('items', Ellipsis, T(dict))):
lockup_view_model = traverse_obj(item, ('lockupViewModel', T(dict)))
@@ -3593,15 +3667,25 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'Unsupported lockup view model content type "{0}"{1}'.format(content_type, bug_reports_message()),
only_once=True)
return
thumb_keys = ('contentImage',) + thumb_keys + ('thumbnailViewModel', 'image')
return merge_dicts(self.url_result(
url, ie=ie.ie_key(), video_id=content_id), {
'title': traverse_obj(view_model, (
'metadata', 'lockupMetadataViewModel', 'title',
'content', T(compat_str))),
'thumbnails': self._extract_thumbnails(
view_model, thumb_keys, final_key='sources'),
})
url, ie=ie.ie_key(), video_id=content_id),
traverse_obj(view_model, {
'title': ('metadata', 'lockupMetadataViewModel', 'title',
'content', T(compat_str)),
'thumbnails': T(lambda vm: self._extract_thumbnails(
vm, thumb_keys, final_key='sources')),
'duration': (
'contentImage', 'thumbnailViewModel', 'overlays',
Ellipsis, (
('thumbnailBottomOverlayViewModel', 'badges'),
('thumbnailOverlayBadgeViewModel', 'thumbnailBadges')
), Ellipsis, 'thumbnailBadgeViewModel', 'text',
T(parse_duration), any),
})
)
def _extract_shorts_lockup_view_model(self, view_model):
content_id = traverse_obj(view_model, (
@@ -3729,7 +3813,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
continuation = None
for is_renderer in traverse_obj(slr_renderer, (
'contents', Ellipsis, 'itemSectionRenderer', T(dict))):
for isr_content in traverse_obj(slr_renderer, (
for isr_content in traverse_obj(is_renderer, (
'contents', Ellipsis, T(dict))):
renderer = isr_content.get('playlistVideoListRenderer')
if renderer:

View File

@@ -421,12 +421,12 @@ def parseOpts(overrideArguments=None):
action='store', dest='youtube_player_js_variant',
help='For YouTube, the player javascript variant to use for n/sig deciphering; `actual` to follow the site; default `%default`.',
choices=('actual', 'main', 'tcc', 'tce', 'es5', 'es6', 'tv', 'tv_es6', 'phone', 'tablet'),
default='main', metavar='VARIANT')
default='actual', metavar='VARIANT')
video_format.add_option(
'--youtube-player-js-version',
action='store', dest='youtube_player_js_version',
help='For YouTube, the player javascript version to use for n/sig deciphering, specified as `signature_timestamp@hash`, or `actual` to follow the site; default `%default`',
default='20348@0004de42', metavar='STS@HASH')
default='actual', metavar='STS@HASH')
video_format.add_option(
'--merge-output-format',
action='store', dest='merge_output_format', metavar='FORMAT', default=None,

View File

@@ -4744,30 +4744,45 @@ def parse_codecs(codecs_str):
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
vcodec, acodec = None, None
lambda s: s.strip(), codecs_str.strip().split(','))))
vcodec, acodec, hdr = None, None, None
for full_codec in split_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
if not vcodec:
vcodec = full_codec
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
codec, rest = full_codec.partition('.')[::2]
codec = codec.lower()
full_codec = '.'.join((codec, rest)) if rest else codec
codec = re.sub(r'0+(?=\d)', '', codec)
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
if vcodec:
continue
vcodec = full_codec
if codec in ('dvh1', 'dvhe'):
hdr = 'DV'
elif codec in ('av1', 'vp9'):
n, m = {
'av1': (2, '10'),
'vp9': (0, '2'),
}[codec]
if (rest.split('.', n + 1)[n:] or [''])[0].lstrip('0') == m:
hdr = 'HDR10'
elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(split_codecs) == 2:
return {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
}
else:
return {
write_string('WARNING: Unknown codec %s\n' % (full_codec,), sys.stderr)
return (
filter_dict({
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
}
return {}
'dynamic_range': hdr,
}) if vcodec or acodec
else {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
} if len(split_codecs) == 2
else {})
def urlhandle_detect_ext(url_handle):