repo_name
stringlengths 7
65
| path
stringlengths 5
189
| copies
stringclasses 611
values | size
stringlengths 4
7
| content
stringlengths 833
1.02M
| license
stringclasses 14
values | hash
stringlengths 32
32
| line_mean
float64 4.65
100
| line_max
int64 16
1k
| alpha_frac
float64 0.25
0.95
| ratio
float64 1.5
7.91
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/cammodels.py
|
20
|
3478
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
url_or_none,
)
class CamModelsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cammodels\.com/cam/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.cammodels.com/cam/AutumnKnight/',
'only_matching': True,
'age_limit': 18
}]
def _real_extract(self, url):
user_id = self._match_id(url)
webpage = self._download_webpage(
url, user_id, headers=self.geo_verification_headers())
manifest_root = self._html_search_regex(
r'manifestUrlRoot=([^&\']+)', webpage, 'manifest', default=None)
if not manifest_root:
ERRORS = (
("I'm offline, but let's stay connected", 'This user is currently offline'),
('in a private show', 'This user is in a private show'),
('is currently performing LIVE', 'This model is currently performing live'),
)
for pattern, message in ERRORS:
if pattern in webpage:
error = message
expected = True
break
else:
error = 'Unable to find manifest URL root'
expected = False
raise ExtractorError(error, expected=expected)
manifest = self._download_json(
'%s%s.json' % (manifest_root, user_id), user_id)
formats = []
for format_id, format_dict in manifest['formats'].items():
if not isinstance(format_dict, dict):
continue
encodings = format_dict.get('encodings')
if not isinstance(encodings, list):
continue
vcodec = format_dict.get('videoCodec')
acodec = format_dict.get('audioCodec')
for media in encodings:
if not isinstance(media, dict):
continue
media_url = url_or_none(media.get('location'))
if not media_url:
continue
format_id_list = [format_id]
height = int_or_none(media.get('videoHeight'))
if height is not None:
format_id_list.append('%dp' % height)
f = {
'url': media_url,
'format_id': '-'.join(format_id_list),
'width': int_or_none(media.get('videoWidth')),
'height': height,
'vbr': int_or_none(media.get('videoKbps')),
'abr': int_or_none(media.get('audioKbps')),
'fps': int_or_none(media.get('fps')),
'vcodec': vcodec,
'acodec': acodec,
}
if 'rtmp' in format_id:
f['ext'] = 'flv'
elif 'hls' in format_id:
f.update({
'ext': 'mp4',
# hls skips fragments, preferring rtmp
'preference': -1,
})
else:
continue
formats.append(f)
self._sort_formats(formats)
return {
'id': user_id,
'title': self._live_title(user_id),
'is_live': True,
'formats': formats,
'age_limit': 18
}
|
unlicense
|
c7e94d3cb26b9fc2aeb3978db98c891d
| 34.489796
| 92
| 0.470385
| 4.402532
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/myspace.py
|
51
|
8412
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
)
class MySpaceIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
myspace\.com/[^/]+/
(?P<mediatype>
video/[^/]+/(?P<video_id>\d+)|
music/song/[^/?#&]+-(?P<song_id>\d+)-\d+(?:[/?#&]|$)
)
'''
_TESTS = [{
'url': 'https://myspace.com/fiveminutestothestage/video/little-big-town/109594919',
'md5': '9c1483c106f4a695c47d2911feed50a7',
'info_dict': {
'id': '109594919',
'ext': 'mp4',
'title': 'Little Big Town',
'description': 'This country quartet was all smiles while playing a sold out show at the Pacific Amphitheatre in Orange County, California.',
'uploader': 'Five Minutes to the Stage',
'uploader_id': 'fiveminutestothestage',
'timestamp': 1414108751,
'upload_date': '20141023',
},
}, {
# songs
'url': 'https://myspace.com/killsorrow/music/song/of-weakened-soul...-93388656-103880681',
'md5': '1d7ee4604a3da226dd69a123f748b262',
'info_dict': {
'id': '93388656',
'ext': 'm4a',
'title': 'Of weakened soul...',
'uploader': 'Killsorrow',
'uploader_id': 'killsorrow',
},
}, {
'add_ie': ['Youtube'],
'url': 'https://myspace.com/threedaysgrace/music/song/animal-i-have-become-28400208-28218041',
'info_dict': {
'id': 'xqds0B_meys',
'ext': 'webm',
'title': 'Three Days Grace - Animal I Have Become',
'description': 'md5:8bd86b3693e72a077cf863a8530c54bb',
'uploader': 'ThreeDaysGraceVEVO',
'uploader_id': 'ThreeDaysGraceVEVO',
'upload_date': '20091002',
},
}, {
'url': 'https://myspace.com/starset2/music/song/first-light-95799905-106964426',
'only_matching': True,
}, {
'url': 'https://myspace.com/thelargemouthbassband/music/song/02-pure-eyes.mp3-94422330-105113388',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id') or mobj.group('song_id')
is_song = mobj.group('mediatype').startswith('music/song')
webpage = self._download_webpage(url, video_id)
player_url = self._search_regex(
r'videoSwf":"([^"?]*)', webpage, 'player URL', fatal=False)
def formats_from_stream_urls(stream_url, hls_stream_url, http_stream_url, width=None, height=None):
formats = []
vcodec = 'none' if is_song else None
if hls_stream_url:
formats.append({
'format_id': 'hls',
'url': hls_stream_url,
'protocol': 'm3u8_native',
'ext': 'm4a' if is_song else 'mp4',
'vcodec': vcodec,
})
if stream_url and player_url:
rtmp_url, play_path = stream_url.split(';', 1)
formats.append({
'format_id': 'rtmp',
'url': rtmp_url,
'play_path': play_path,
'player_url': player_url,
'protocol': 'rtmp',
'ext': 'flv',
'width': width,
'height': height,
'vcodec': vcodec,
})
if http_stream_url:
formats.append({
'format_id': 'http',
'url': http_stream_url,
'width': width,
'height': height,
'vcodec': vcodec,
})
return formats
if is_song:
# songs don't store any useful info in the 'context' variable
song_data = self._search_regex(
r'''<button.*data-song-id=(["\'])%s\1.*''' % video_id,
webpage, 'song_data', default=None, group=0)
if song_data is None:
# some songs in an album are not playable
self.report_warning(
'%s: No downloadable song on this page' % video_id)
return
def search_data(name):
return self._search_regex(
r'''data-%s=([\'"])(?P<data>.*?)\1''' % name,
song_data, name, default='', group='data')
formats = formats_from_stream_urls(
search_data('stream-url'), search_data('hls-stream-url'),
search_data('http-stream-url'))
if not formats:
vevo_id = search_data('vevo-id')
youtube_id = search_data('youtube-id')
if vevo_id:
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
elif youtube_id:
self.to_screen('Youtube video detected: %s' % youtube_id)
return self.url_result(youtube_id, ie='Youtube')
else:
raise ExtractorError(
'Found song but don\'t know how to download it')
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'uploader': search_data('artist-name'),
'uploader_id': search_data('artist-username'),
'thumbnail': self._og_search_thumbnail(webpage),
'duration': int_or_none(search_data('duration')),
'formats': formats,
}
else:
video = self._parse_json(self._search_regex(
r'context = ({.*?});', webpage, 'context'),
video_id)['video']
formats = formats_from_stream_urls(
video.get('streamUrl'), video.get('hlsStreamUrl'),
video.get('mp4StreamUrl'), int_or_none(video.get('width')),
int_or_none(video.get('height')))
self._sort_formats(formats)
return {
'id': video_id,
'title': video['title'],
'description': video.get('description'),
'thumbnail': video.get('imageUrl'),
'uploader': video.get('artistName'),
'uploader_id': video.get('artistUsername'),
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('dateAdded')),
'formats': formats,
}
class MySpaceAlbumIE(InfoExtractor):
IE_NAME = 'MySpace:album'
_VALID_URL = r'https?://myspace\.com/([^/]+)/music/album/(?P<title>.*-)(?P<id>\d+)'
_TESTS = [{
'url': 'https://myspace.com/starset2/music/album/transmissions-19455773',
'info_dict': {
'title': 'Transmissions',
'id': '19455773',
},
'playlist_count': 14,
'skip': 'this album is only available in some countries',
}, {
'url': 'https://myspace.com/killsorrow/music/album/the-demo-18596029',
'info_dict': {
'title': 'The Demo',
'id': '18596029',
},
'playlist_count': 5,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
display_id = mobj.group('title') + playlist_id
webpage = self._download_webpage(url, display_id)
tracks_paths = re.findall(r'"music:song" content="(.*?)"', webpage)
if not tracks_paths:
raise ExtractorError(
'%s: No songs found, try using proxy' % display_id,
expected=True)
entries = [
self.url_result(t_path, ie=MySpaceIE.ie_key())
for t_path in tracks_paths]
return {
'_type': 'playlist',
'id': playlist_id,
'display_id': display_id,
'title': self._og_search_title(webpage),
'entries': entries,
}
|
unlicense
|
524f54e8b7ffd9b61de9e69cd2e8a8ef
| 38.660377
| 153
| 0.483349
| 3.787387
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/utils.py
|
1
|
168589
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import collections
import contextlib
import ctypes
import datetime
import email.utils
import email.header
import errno
import functools
import gzip
import io
import itertools
import json
import locale
import math
import operator
import os
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParseError,
compat_HTMLParser,
compat_basestring,
compat_chr,
compat_cookiejar,
compat_ctypes_WINFUNCTYPE,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_integer_types,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_quote,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
def random_user_agent():
_USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
_CHROME_VERSIONS = (
'74.0.3729.129',
'76.0.3780.3',
'76.0.3780.2',
'74.0.3729.128',
'76.0.3780.1',
'76.0.3780.0',
'75.0.3770.15',
'74.0.3729.127',
'74.0.3729.126',
'76.0.3779.1',
'76.0.3779.0',
'75.0.3770.14',
'74.0.3729.125',
'76.0.3778.1',
'76.0.3778.0',
'75.0.3770.13',
'74.0.3729.124',
'74.0.3729.123',
'73.0.3683.121',
'76.0.3777.1',
'76.0.3777.0',
'75.0.3770.12',
'74.0.3729.122',
'76.0.3776.4',
'75.0.3770.11',
'74.0.3729.121',
'76.0.3776.3',
'76.0.3776.2',
'73.0.3683.120',
'74.0.3729.120',
'74.0.3729.119',
'74.0.3729.118',
'76.0.3776.1',
'76.0.3776.0',
'76.0.3775.5',
'75.0.3770.10',
'74.0.3729.117',
'76.0.3775.4',
'76.0.3775.3',
'74.0.3729.116',
'75.0.3770.9',
'76.0.3775.2',
'76.0.3775.1',
'76.0.3775.0',
'75.0.3770.8',
'74.0.3729.115',
'74.0.3729.114',
'76.0.3774.1',
'76.0.3774.0',
'75.0.3770.7',
'74.0.3729.113',
'74.0.3729.112',
'74.0.3729.111',
'76.0.3773.1',
'76.0.3773.0',
'75.0.3770.6',
'74.0.3729.110',
'74.0.3729.109',
'76.0.3772.1',
'76.0.3772.0',
'75.0.3770.5',
'74.0.3729.108',
'74.0.3729.107',
'76.0.3771.1',
'76.0.3771.0',
'75.0.3770.4',
'74.0.3729.106',
'74.0.3729.105',
'75.0.3770.3',
'74.0.3729.104',
'74.0.3729.103',
'74.0.3729.102',
'75.0.3770.2',
'74.0.3729.101',
'75.0.3770.1',
'75.0.3770.0',
'74.0.3729.100',
'75.0.3769.5',
'75.0.3769.4',
'74.0.3729.99',
'75.0.3769.3',
'75.0.3769.2',
'75.0.3768.6',
'74.0.3729.98',
'75.0.3769.1',
'75.0.3769.0',
'74.0.3729.97',
'73.0.3683.119',
'73.0.3683.118',
'74.0.3729.96',
'75.0.3768.5',
'75.0.3768.4',
'75.0.3768.3',
'75.0.3768.2',
'74.0.3729.95',
'74.0.3729.94',
'75.0.3768.1',
'75.0.3768.0',
'74.0.3729.93',
'74.0.3729.92',
'73.0.3683.117',
'74.0.3729.91',
'75.0.3766.3',
'74.0.3729.90',
'75.0.3767.2',
'75.0.3767.1',
'75.0.3767.0',
'74.0.3729.89',
'73.0.3683.116',
'75.0.3766.2',
'74.0.3729.88',
'75.0.3766.1',
'75.0.3766.0',
'74.0.3729.87',
'73.0.3683.115',
'74.0.3729.86',
'75.0.3765.1',
'75.0.3765.0',
'74.0.3729.85',
'73.0.3683.114',
'74.0.3729.84',
'75.0.3764.1',
'75.0.3764.0',
'74.0.3729.83',
'73.0.3683.113',
'75.0.3763.2',
'75.0.3761.4',
'74.0.3729.82',
'75.0.3763.1',
'75.0.3763.0',
'74.0.3729.81',
'73.0.3683.112',
'75.0.3762.1',
'75.0.3762.0',
'74.0.3729.80',
'75.0.3761.3',
'74.0.3729.79',
'73.0.3683.111',
'75.0.3761.2',
'74.0.3729.78',
'74.0.3729.77',
'75.0.3761.1',
'75.0.3761.0',
'73.0.3683.110',
'74.0.3729.76',
'74.0.3729.75',
'75.0.3760.0',
'74.0.3729.74',
'75.0.3759.8',
'75.0.3759.7',
'75.0.3759.6',
'74.0.3729.73',
'75.0.3759.5',
'74.0.3729.72',
'73.0.3683.109',
'75.0.3759.4',
'75.0.3759.3',
'74.0.3729.71',
'75.0.3759.2',
'74.0.3729.70',
'73.0.3683.108',
'74.0.3729.69',
'75.0.3759.1',
'75.0.3759.0',
'74.0.3729.68',
'73.0.3683.107',
'74.0.3729.67',
'75.0.3758.1',
'75.0.3758.0',
'74.0.3729.66',
'73.0.3683.106',
'74.0.3729.65',
'75.0.3757.1',
'75.0.3757.0',
'74.0.3729.64',
'73.0.3683.105',
'74.0.3729.63',
'75.0.3756.1',
'75.0.3756.0',
'74.0.3729.62',
'73.0.3683.104',
'75.0.3755.3',
'75.0.3755.2',
'73.0.3683.103',
'75.0.3755.1',
'75.0.3755.0',
'74.0.3729.61',
'73.0.3683.102',
'74.0.3729.60',
'75.0.3754.2',
'74.0.3729.59',
'75.0.3753.4',
'74.0.3729.58',
'75.0.3754.1',
'75.0.3754.0',
'74.0.3729.57',
'73.0.3683.101',
'75.0.3753.3',
'75.0.3752.2',
'75.0.3753.2',
'74.0.3729.56',
'75.0.3753.1',
'75.0.3753.0',
'74.0.3729.55',
'73.0.3683.100',
'74.0.3729.54',
'75.0.3752.1',
'75.0.3752.0',
'74.0.3729.53',
'73.0.3683.99',
'74.0.3729.52',
'75.0.3751.1',
'75.0.3751.0',
'74.0.3729.51',
'73.0.3683.98',
'74.0.3729.50',
'75.0.3750.0',
'74.0.3729.49',
'74.0.3729.48',
'74.0.3729.47',
'75.0.3749.3',
'74.0.3729.46',
'73.0.3683.97',
'75.0.3749.2',
'74.0.3729.45',
'75.0.3749.1',
'75.0.3749.0',
'74.0.3729.44',
'73.0.3683.96',
'74.0.3729.43',
'74.0.3729.42',
'75.0.3748.1',
'75.0.3748.0',
'74.0.3729.41',
'75.0.3747.1',
'73.0.3683.95',
'75.0.3746.4',
'74.0.3729.40',
'74.0.3729.39',
'75.0.3747.0',
'75.0.3746.3',
'75.0.3746.2',
'74.0.3729.38',
'75.0.3746.1',
'75.0.3746.0',
'74.0.3729.37',
'73.0.3683.94',
'75.0.3745.5',
'75.0.3745.4',
'75.0.3745.3',
'75.0.3745.2',
'74.0.3729.36',
'75.0.3745.1',
'75.0.3745.0',
'75.0.3744.2',
'74.0.3729.35',
'73.0.3683.93',
'74.0.3729.34',
'75.0.3744.1',
'75.0.3744.0',
'74.0.3729.33',
'73.0.3683.92',
'74.0.3729.32',
'74.0.3729.31',
'73.0.3683.91',
'75.0.3741.2',
'75.0.3740.5',
'74.0.3729.30',
'75.0.3741.1',
'75.0.3741.0',
'74.0.3729.29',
'75.0.3740.4',
'73.0.3683.90',
'74.0.3729.28',
'75.0.3740.3',
'73.0.3683.89',
'75.0.3740.2',
'74.0.3729.27',
'75.0.3740.1',
'75.0.3740.0',
'74.0.3729.26',
'73.0.3683.88',
'73.0.3683.87',
'74.0.3729.25',
'75.0.3739.1',
'75.0.3739.0',
'73.0.3683.86',
'74.0.3729.24',
'73.0.3683.85',
'75.0.3738.4',
'75.0.3738.3',
'75.0.3738.2',
'75.0.3738.1',
'75.0.3738.0',
'74.0.3729.23',
'73.0.3683.84',
'74.0.3729.22',
'74.0.3729.21',
'75.0.3737.1',
'75.0.3737.0',
'74.0.3729.20',
'73.0.3683.83',
'74.0.3729.19',
'75.0.3736.1',
'75.0.3736.0',
'74.0.3729.18',
'73.0.3683.82',
'74.0.3729.17',
'75.0.3735.1',
'75.0.3735.0',
'74.0.3729.16',
'73.0.3683.81',
'75.0.3734.1',
'75.0.3734.0',
'74.0.3729.15',
'73.0.3683.80',
'74.0.3729.14',
'75.0.3733.1',
'75.0.3733.0',
'75.0.3732.1',
'74.0.3729.13',
'74.0.3729.12',
'73.0.3683.79',
'74.0.3729.11',
'75.0.3732.0',
'74.0.3729.10',
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
std_headers = {
'User-Agent': random_user_agent(),
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %drd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %drd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %drd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
'%B %d %Y at %H:%M',
'%B %d %Y at %H:%M:%S',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
try:
mask = os.umask(0)
os.umask(mask)
os.chmod(tf.name, 0o666 & ~mask)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
retlist = []
for m in re.finditer(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html):
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
retlist.append(unescapeHTML(res))
return retlist
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
try:
parser.feed(html_element)
parser.close()
# Older Python may throw HTMLParseError in case of malformed HTML
except compat_HTMLParseError:
pass
return parser.attrs
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s):
"""Sanitizes and normalizes path on Windows"""
if sys.platform != 'win32':
return s
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
def sanitize_url(url):
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
# the number of unwanted failures due to missing protocol
if url.startswith('//'):
return 'http:%s' % url
# Fix some common typos seen so far
COMMON_TYPOS = (
# https://github.com/ytdl-org/youtube-dl/issues/15649
(r'^httpss://', r'https://'),
# https://bx1.be/lives/direct-tv/
(r'^rmtp([es]?)://', r'rtmp\1://'),
)
for mistake, fixup in COMMON_TYPOS:
if re.match(mistake, url):
return re.sub(mistake, fixup, url)
return url
def sanitized_Request(url, *args, **kwargs):
return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/ytdl-org/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
elif secs > 60:
return '%d:%02d' % (secs // 60, secs % 60)
else:
return '%d' % secs
def make_HTTPS_handler(params, **kwargs):
opts_no_check_certificate = params.get('nocheckcertificate', False)
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if opts_no_check_certificate:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
try:
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
except TypeError:
# Python 2.7.8
# (create_default_context present but HTTPSHandler has no context=)
pass
if sys.version_info < (3, 2):
return YoutubeDLHTTPSHandler(params, **kwargs)
else: # Python < 3.4
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message():
if ytdl_is_updateable():
update_cmd = 'type youtube-dl -U to update'
else:
update_cmd = 'see https://yt-dl.org/update on how to update'
msg = '; please report this issue on https://yt-dl.org/bug .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
return msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
pass
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
msg += bug_reports_message()
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None):
super(GeoRestrictedError, self).__init__(msg, expected=True)
self.msg = msg
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
super(PostProcessingError, self).__init__(msg)
self.msg = msg
class MaxDownloadsReached(YoutubeDLError):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT)
or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/ytdl-org/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs['strict'] = True
hc = http_class(*args, **compat_kwargs(kwargs))
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
# This is to workaround _create_connection() from socket where it will try all
# address data from getaddrinfo() including IPv6. This filters the result from
# getaddrinfo() based on the source_address value.
# This is based on the cpython socket.create_connection() function.
# https://github.com/python/cpython/blob/master/Lib/socket.py#L691
def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
host, port = address
err = None
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
ip_addrs = [addr for addr in addrs if addr[0] == af]
if addrs and not ip_addrs:
ip_version = 'v4' if af == socket.AF_INET else 'v6'
raise socket.error(
"No remote IP%s addresses available for connect, can't use '%s' as source address"
% (ip_version, source_address[0]))
for res in ip_addrs:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.bind(source_address)
sock.connect(sa)
err = None # Explicitly break reference cycle
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
if hasattr(hc, '_create_connection'):
hc._create_connection = _create_connection
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = _create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/ytdl-org/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
"""
See [1] for cookie file format.
1. https://curl.haxx.se/docs/http-cookies.html
"""
_HTTPONLY_PREFIX = '#HttpOnly_'
_ENTRY_LEN = 7
_HEADER = '''# Netscape HTTP Cookie File
# This file is generated by youtube-dl. Do not edit.
'''
_CookieFileEntry = collections.namedtuple(
'CookieFileEntry',
('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""
Save cookies to a file.
Most of the code is taken from CPython 3.8 and slightly adapted
to support cookie files with UTF-8 in both python 2 and 3.
"""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
# Store session cookies with `expires` set to 0 instead of an empty
# string
for cookie in self:
if cookie.expires is None:
cookie.expires = 0
with io.open(filename, 'w', encoding='utf-8') as f:
f.write(self._HEADER)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure:
secure = 'TRUE'
else:
secure = 'FALSE'
if cookie.domain.startswith('.'):
initial_dot = 'TRUE'
else:
initial_dot = 'FALSE'
if cookie.expires is not None:
expires = compat_str(cookie.expires)
else:
expires = ''
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ''
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
'\t'.join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value]) + '\n')
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
def prepare_line(line):
if line.startswith(self._HTTPONLY_PREFIX):
line = line[len(self._HTTPONLY_PREFIX):]
# comments and empty lines are fine
if line.startswith('#') or not line.strip():
return line
cookie_list = line.split('\t')
if len(cookie_list) != self._ENTRY_LEN:
raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
cookie = self._CookieFileEntry(*cookie_list)
if cookie.expires_at and not cookie.expires_at.isdigit():
raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
return line
cf = io.StringIO()
with io.open(filename, encoding='utf-8') as f:
for line in f:
try:
cf.write(prepare_line(line))
except compat_cookiejar.LoadError as e:
write_string(
'WARNING: skipping cookie file entry due to %s: %r\n'
% (e, line), sys.stderr)
continue
cf.seek(0)
self._really_load(cf, filename, ignore_discard, ignore_expires)
# Session cookies are denoted by either `expires` field set to
# an empty string or 0. MozillaCookieJar only recognizes the former
# (see [1]). So we need force the latter to be recognized as session
# cookies on our own.
# Session cookies may be important for cookies-based authentication,
# e.g. usually, when user does not check 'Remember me' check box while
# logging in on a site, some important cookies are stored as session
# cookies so that not recognizing them will result in failed login.
# 1. https://bugs.python.org/issue17164
for cookie in self:
# Treat `expires=0` cookies as session cookies
if cookie.expires == 0:
cookie.expires = None
cookie.discard = True
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/ytdl-org/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
if sys.version_info[0] < 3:
def redirect_request(self, req, fp, code, msg, headers, newurl):
# On python 2 urlh.geturl() may sometimes return redirect URL
# as byte string instead of unicode. This workaround allows
# to force it always return unicode.
return compat_urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, compat_str(newurl))
def extract_timezone(date_str):
m = re.search(
r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = re.sub(r'[,|]', '', date_str)
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
# Remove unrecognized timezones from ISO 8601 alike timestamps
m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today)[+-][0-9](day|week|month|year)(s)?"""
today = datetime.date.today()
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if match is not None:
sign = match.group('sign')
time = int(match.group('time'))
if sign == '-':
time = -time
unit = match.group('unit')
# A bad approximation?
if unit == 'month':
unit = 'day'
time *= 30
elif unit == 'year':
unit = 'day'
time *= 365
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, '%Y%m%d').date()
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '')
or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(compat_shlex_quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_resolution(s):
if s is None:
return {}
mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
if mobj:
return {
'width': int(mobj.group('w')),
'height': int(mobj.group('h')),
}
mobj = re.search(r'\b(\d+)[pPiI]\b', s)
if mobj:
return {'height': int(mobj.group(1))}
mobj = re.search(r'\b([48])[kK]\b', s)
if mobj:
return {'height': int(mobj.group(1)) * 540}
return {}
def parse_bitrate(s):
if not isinstance(s, compat_str):
return
mobj = re.search(r'\b(\d+)\s*kbps', s)
if mobj:
return int(mobj.group(1))
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except (ValueError, TypeError):
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if isinstance(int_str, compat_integer_types):
return int_str
elif isinstance(int_str, compat_str):
int_str = re.sub(r'[,\.\+]', '', int_str)
return int_or_none(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except (ValueError, TypeError):
return default
def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default
def strip_or_none(v, default=None):
return v.strip() if isinstance(v, compat_str) else default
def url_or_none(url):
if not url or not isinstance(url, compat_str):
return None
url = url.strip()
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?
(?:
[0-9]+\s*y(?:ears?)?\s*
)?
(?:
[0-9]+\s*m(?:onths?)?\s*
)?
(?:
[0-9]+\s*w(?:eeks?)?\s*
)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
T)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if youtube-dl is run in the background.
# See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
out, _ = subprocess.Popen(
[encodeArgument(exe)] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class PagedList(object):
def __len__(self):
# This is only useful for tests
return len(self.getslice())
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
if use_cache:
self._cache = {}
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = None
if self._use_cache:
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = '\xef\xbb\xbf'
if url.startswith(BOM_UTF8):
url = url[len(BOM_UTF8):]
url = url.strip()
if url.startswith(('#', ';', ']')):
return False
return url
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def _multipart_encode_impl(data, boundary):
content_type = 'multipart/form-data; boundary=%s' % boundary
out = b''
for k, v in data.items():
out += b'--' + boundary.encode('ascii') + b'\r\n'
if isinstance(k, compat_str):
k = k.encode('utf-8')
if isinstance(v, compat_str):
v = v.encode('utf-8')
# RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
# suggests sending UTF-8 directly. Firefox sends UTF-8, too
content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
if boundary.encode('ascii') in content:
raise ValueError('Boundary overlaps with data')
out += content
out += b'--' + boundary.encode('ascii') + b'--\r\n'
return out, content_type
def multipart_encode(data, boundary=None):
'''
Encode a dict to RFC 7578-compliant form-data
data:
A dict where keys and values can be either Unicode or bytes-like
objects.
boundary:
If specified a Unicode object, it's used as the boundary. Otherwise
a random boundary is generated.
Reference: https://tools.ietf.org/html/rfc7578
'''
has_specified_boundary = boundary is not None
while True:
if boundary is None:
boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
try:
out, content_type = _multipart_encode_impl(data, boundary)
break
except ValueError:
if has_specified_boundary:
raise
boundary = None
return out, content_type
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def merge_dicts(*dicts):
merged = {}
for a_dict in dicts:
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged
or (isinstance(v, compat_str) and v
and isinstance(merged[k], compat_str)
and not merged[k])):
merged[k] = v
return merged
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
if s in US_RATINGS:
return US_RATINGS[s]
m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
if m:
return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
return None
def strip_jsonp(code):
return re.sub(
r'''(?sx)^
(?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
(?:\s*&&\s*(?P=func_name))?
\s*\(\s*(?P<callback_data>.*)\);?
\s*?(?://[^\n]*)*$''',
r'\g<callback_data>', code)
def js_to_json(code):
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
else:
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)|
!+
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if youtube-dl can be updated with -U """
from zipimport import zipimporter
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
ext = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
}.get(mt)
if ext is not None:
return ext
_, _, res = mt.rpartition('/')
res = res.split(';')[0].strip().lower()
return {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
'x-wav': 'wav',
}.get(res, res)
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
vcodec, acodec = None, None
for full_codec in split_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
if not vcodec:
vcodec = full_codec
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(split_codecs) == 2:
return {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = info_dict['url']
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data):
""" Render a list of rows, each as a list of values """
table = [header_row] + data
max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
return '\n'.join(format_str % tuple(row) for row in table)
def _match_one(filter_part, dct):
COMPARISON_OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
(?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
(?P<strval>(?![0-9.])[a-z0-9A-Z]*)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = COMPARISON_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if (m.group('quotedstrval') is not None
or m.group('strval') is not None
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082).
or actual_value is not None and m.group('intval') is not None
and isinstance(actual_value, compat_str)):
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
quote = m.group('quote')
if quote is not None:
comparison_value = comparison_value.replace(r'\%s' % quote, quote)
else:
try:
comparison_value = int(m.group('intval'))
except ValueError:
comparison_value = parse_filesize(m.group('intval'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('intval') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid integer value %r in filter part %r' % (
m.group('intval'), filter_part))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
UNARY_OPERATORS = {
'': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
'!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
return all(
_match_one(filter_part, dct) for filter_part in filter_str.split('&'))
def match_filter_func(filter_str):
def _match_func(info_dict):
if match_str(filter_str, info_dict):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
def dfxp2srt(dfxp_data):
'''
@param dfxp_data A bytes-like object containing DFXP data
@returns A unicode object containing converted SRT data
'''
LEGACY_NAMESPACES = (
(b'http://www.w3.org/ns/ttml', [
b'http://www.w3.org/2004/11/ttaf1',
b'http://www.w3.org/2006/04/ttaf1',
b'http://www.w3.org/2006/10/ttaf1',
]),
(b'http://www.w3.org/ns/ttml#styling', [
b'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'xml': 'http://www.w3.org/XML/1998/namespace',
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data)
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id') or style.get(_x('xml:id'))
if not style_id:
continue
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
if param is None:
return []
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(params, param, default=[]):
ex_args = params.get(param)
if ex_args is None:
return default
assert isinstance(ex_args, list)
return ex_args
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'iw': 'heb', # Replaced by he in 1989 revision
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'in': 'ind', # Replaced by id in 1989 revision
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'ji': 'yid', # Replaced by yi in 1989 revision
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '46.172.224.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '182.50.184.0/21',
'AQ': '23.154.160.0/24',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '77.116.0.0/14',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AX': '185.217.4.0/22',
'AZ': '5.197.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '102.178.0.0/15',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '185.212.72.0/23',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '191.128.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '197.242.176.0/21',
'CG': '160.113.0.0/16',
'CH': '85.0.0.0/13',
'CI': '102.136.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '102.244.0.0/14',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '31.153.0.0/16',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FK': '80.73.208.0/21',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '154.160.0.0/12',
'GI': '95.164.0.0/16',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '133.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.167.192.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '24.92.144.0/20',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '102.183.0.0/16',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '102.70.0.0/15',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '102.35.0.0/16',
'RO': '79.112.0.0/13',
'RS': '93.86.0.0/15',
'RU': '5.136.0.0/13',
'RW': '41.186.0.0/16',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '102.120.0.0/13',
'SE': '78.64.0.0/12',
'SG': '8.128.0.0/10',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '102.143.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '154.115.192.0/18',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '37.52.0.0/14',
'UG': '102.80.0.0/13',
'US': '6.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '84.54.64.0/18',
'VA': '212.77.0.0/19',
'VC': '207.191.240.0/21',
'VE': '186.88.0.0/13',
'VG': '66.81.192.0/20',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '102.144.0.0/13',
'ZW': '102.177.192.0/18',
}
@classmethod
def random_ipv4(cls, code_or_block):
if len(code_or_block) == 2:
block = cls._country_ip_map.get(code_or_block.upper())
if not block:
return None
else:
block = code_or_block
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# youtube-dl's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfuscated_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfuscated_code)
def caesar(s, alphabet, shift):
if shift == 0:
return s
l = len(alphabet)
return ''.join(
alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
for c in s)
def rot47(s):
return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/ytdl-org/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'youtube-dl requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)])
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
def random_birthday(year_field, month_field, day_field):
start_date = datetime.date(1950, 1, 1)
end_date = datetime.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days)
random_date = start_date + datetime.timedelta(offset)
return {
year_field: str(random_date.year),
month_field: str(random_date.month),
day_field: str(random_date.day),
}
def clean_podcast_url(url):
return re.sub(r'''(?x)
(?:
(?:
chtbl\.com/track|
media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
play\.podtrac\.com
)/[^/]+|
(?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
flex\.acast\.com|
pd(?:
cn\.co| # https://podcorn.com/analytics-prefix/
st\.fm # https://podsights.com/docs/
)/e
)/''', '', url)
|
unlicense
|
5feb53f7fe5d4499734ad2b629878957
| 28.434236
| 133
| 0.494294
| 2.99341
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/pornhub.py
|
1
|
26854
|
# coding: utf-8
from __future__ import unicode_literals
import functools
import itertools
import operator
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
)
from .openload import PhantomJSwrapper
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
merge_dicts,
NO_DEFAULT,
orderedSet,
remove_quotes,
str_to_int,
update_url_query,
urlencode_postdata,
url_or_none,
)
class PornHubBaseIE(InfoExtractor):
_NETRC_MACHINE = 'pornhub'
def _download_webpage_handle(self, *args, **kwargs):
def dl(*args, **kwargs):
return super(PornHubBaseIE, self)._download_webpage_handle(*args, **kwargs)
ret = dl(*args, **kwargs)
if not ret:
return ret
webpage, urlh = ret
if any(re.search(p, webpage) for p in (
r'<body\b[^>]+\bonload=["\']go\(\)',
r'document\.cookie\s*=\s*["\']RNKEY=',
r'document\.location\.reload\(true\)')):
url_or_request = args[0]
url = (url_or_request.get_full_url()
if isinstance(url_or_request, compat_urllib_request.Request)
else url_or_request)
phantom = PhantomJSwrapper(self, required_version='2.0')
phantom.get(url, html=webpage)
webpage, urlh = dl(*args, **kwargs)
return webpage, urlh
def _real_initialize(self):
self._logged_in = False
def _login(self, host):
if self._logged_in:
return
site = host.split('.')[0]
# Both sites pornhub and pornhubpremium have separate accounts
# so there should be an option to provide credentials for both.
# At the same time some videos are available under the same video id
# on both sites so that we have to identify them as the same video.
# For that purpose we have to keep both in the same extractor
# but under different netrc machines.
username, password = self._get_login_info(netrc_machine=site)
if username is None:
return
login_url = 'https://www.%s/%slogin' % (host, 'premium/' if 'premium' in host else '')
login_page = self._download_webpage(
login_url, None, 'Downloading %s login page' % site)
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'class=["\']signOut',
r'>Sign\s+[Oo]ut\s*<'))
if is_logged(login_page):
self._logged_in = True
return
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
response = self._download_json(
'https://www.%s/front/authenticate' % host, None,
'Logging in to %s' % site,
data=urlencode_postdata(login_form),
headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': login_url,
'X-Requested-With': 'XMLHttpRequest',
})
if response.get('success') == '1':
self._logged_in = True
return
message = response.get('message')
if message is not None:
raise ExtractorError(
'Unable to login: %s' % message, expected=True)
raise ExtractorError('Unable to log in')
class PornHubIE(PornHubBaseIE):
IE_DESC = 'PornHub and Thumbzilla'
_VALID_URL = r'''(?x)
https?://
(?:
(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
(?:www\.)?thumbzilla\.com/video/
)
(?P<id>[\da-z]+)
'''
_TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': 'a6391306d050e4547f62b3f485dd9ba9',
'info_dict': {
'id': '648719015',
'ext': 'mp4',
'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
'uploader': 'Babes',
'upload_date': '20130628',
'timestamp': 1372447216,
'duration': 361,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
},
}, {
# non-ASCII title
'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
'info_dict': {
'id': '1331683002',
'ext': 'mp4',
'title': '重庆婷婷女王足交',
'upload_date': '20150213',
'timestamp': 1423804862,
'duration': 1753,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
},
'params': {
'skip_download': True,
},
}, {
# subtitles
'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5af5fef7c2aa7',
'info_dict': {
'id': 'ph5af5fef7c2aa7',
'ext': 'mp4',
'title': 'BFFS - Cute Teen Girls Share Cock On the Floor',
'uploader': 'BFFs',
'duration': 622,
'view_count': int,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 18,
'tags': list,
'categories': list,
'subtitles': {
'en': [{
"ext": 'srt'
}]
},
},
'params': {
'skip_download': True,
},
'skip': 'This video has been disabled',
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True,
}, {
# removed at the request of cam4.com
'url': 'http://fr.pornhub.com/view_video.php?viewkey=ph55ca2f9760862',
'only_matching': True,
}, {
# removed at the request of the copyright owner
'url': 'http://www.pornhub.com/view_video.php?viewkey=788152859',
'only_matching': True,
}, {
# removed by uploader
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph572716d15a111',
'only_matching': True,
}, {
# private video
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
'only_matching': True,
}, {
'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
'only_matching': True,
}, {
'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
'only_matching': True,
}, {
'url': 'https://www.pornhub.org/view_video.php?viewkey=203640933',
'only_matching': True,
}, {
'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5e4acdae54a82',
'only_matching': True,
}, {
# Some videos are available with the same id on both premium
# and non-premium sites (e.g. this and the following test)
'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5f75b0f4b18e3',
'only_matching': True,
}, {
'url': 'https://www.pornhubpremium.com/view_video.php?viewkey=ph5f75b0f4b18e3',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub(?:premium)?\.(?:com|net|org)/embed/[\da-z]+)',
webpage)
def _extract_count(self, pattern, webpage, name):
return str_to_int(self._search_regex(
pattern, webpage, '%s count' % name, fatal=False))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host') or 'pornhub.com'
video_id = mobj.group('id')
self._login(host)
self._set_cookie(host, 'age_verified', '1')
def dl_webpage(platform):
self._set_cookie(host, 'platform', platform)
return self._download_webpage(
'https://www.%s/view_video.php?viewkey=%s' % (host, video_id),
video_id, 'Downloading %s webpage' % platform)
webpage = dl_webpage('pc')
error_msg = self._html_search_regex(
r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
webpage, 'error message', default=None, group='error')
if error_msg:
error_msg = re.sub(r'\s+', ' ', error_msg)
raise ExtractorError(
'PornHub said: %s' % error_msg,
expected=True, video_id=video_id)
# video_title from flashvars contains whitespace instead of non-ASCII (see
# http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
# on that anymore.
title = self._html_search_meta(
'twitter:title', webpage, default=None) or self._html_search_regex(
(r'(?s)<h1[^>]+class=["\']title["\'][^>]*>(?P<title>.+?)</h1>',
r'<div[^>]+data-video-title=(["\'])(?P<title>(?:(?!\1).)+)\1',
r'shareTitle["\']\s*[=:]\s*(["\'])(?P<title>(?:(?!\1).)+)\1'),
webpage, 'title', group='title')
video_urls = []
video_urls_set = set()
subtitles = {}
flashvars = self._parse_json(
self._search_regex(
r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
video_id)
if flashvars:
subtitle_url = url_or_none(flashvars.get('closedCaptionsFile'))
if subtitle_url:
subtitles.setdefault('en', []).append({
'url': subtitle_url,
'ext': 'srt',
})
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
media_definitions = flashvars.get('mediaDefinitions')
if isinstance(media_definitions, list):
for definition in media_definitions:
if not isinstance(definition, dict):
continue
video_url = definition.get('videoUrl')
if not video_url or not isinstance(video_url, compat_str):
continue
if video_url in video_urls_set:
continue
video_urls_set.add(video_url)
video_urls.append(
(video_url, int_or_none(definition.get('quality'))))
else:
thumbnail, duration = [None] * 2
def extract_js_vars(webpage, pattern, default=NO_DEFAULT):
assignments = self._search_regex(
pattern, webpage, 'encoded url', default=default)
if not assignments:
return {}
assignments = assignments.split(';')
js_vars = {}
def parse_js_value(inp):
inp = re.sub(r'/\*(?:(?!\*/).)*?\*/', '', inp)
if '+' in inp:
inps = inp.split('+')
return functools.reduce(
operator.concat, map(parse_js_value, inps))
inp = inp.strip()
if inp in js_vars:
return js_vars[inp]
return remove_quotes(inp)
for assn in assignments:
assn = assn.strip()
if not assn:
continue
assn = re.sub(r'var\s+', '', assn)
vname, value = assn.split('=', 1)
js_vars[vname] = parse_js_value(value)
return js_vars
def add_video_url(video_url):
v_url = url_or_none(video_url)
if not v_url:
return
if v_url in video_urls_set:
return
video_urls.append((v_url, None))
video_urls_set.add(v_url)
def parse_quality_items(quality_items):
q_items = self._parse_json(quality_items, video_id, fatal=False)
if not isinstance(q_items, list):
return
for item in q_items:
if isinstance(item, dict):
add_video_url(item.get('url'))
if not video_urls:
FORMAT_PREFIXES = ('media', 'quality', 'qualityItems')
js_vars = extract_js_vars(
webpage, r'(var\s+(?:%s)_.+)' % '|'.join(FORMAT_PREFIXES),
default=None)
if js_vars:
for key, format_url in js_vars.items():
if key.startswith(FORMAT_PREFIXES[-1]):
parse_quality_items(format_url)
elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]):
add_video_url(format_url)
if not video_urls and re.search(
r'<[^>]+\bid=["\']lockedPlayer', webpage):
raise ExtractorError(
'Video %s is locked' % video_id, expected=True)
if not video_urls:
js_vars = extract_js_vars(
dl_webpage('tv'), r'(var.+?mediastring.+?)</script>')
add_video_url(js_vars['mediastring'])
for mobj in re.finditer(
r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage):
video_url = mobj.group('url')
if video_url not in video_urls_set:
video_urls.append((video_url, None))
video_urls_set.add(video_url)
upload_date = None
formats = []
for video_url, height in video_urls:
if not upload_date:
upload_date = self._search_regex(
r'/(\d{6}/\d{2})/', video_url, 'upload data', default=None)
if upload_date:
upload_date = upload_date.replace('/', '')
ext = determine_ext(video_url)
if ext == 'mpd':
formats.extend(self._extract_mpd_formats(
video_url, video_id, mpd_id='dash', fatal=False))
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
tbr = None
mobj = re.search(r'(?P<height>\d+)[pP]?_(?P<tbr>\d+)[kK]', video_url)
if mobj:
if not height:
height = int(mobj.group('height'))
tbr = int(mobj.group('tbr'))
formats.append({
'url': video_url,
'format_id': '%dp' % height if height else None,
'height': height,
'tbr': tbr,
})
self._sort_formats(formats)
video_uploader = self._html_search_regex(
r'(?s)From: .+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
webpage, 'uploader', default=None)
def extract_vote_count(kind, name):
return self._extract_count(
(r'<span[^>]+\bclass="votes%s"[^>]*>([\d,\.]+)</span>' % kind,
r'<span[^>]+\bclass=["\']votes%s["\'][^>]*\bdata-rating=["\'](\d+)' % kind),
webpage, name)
view_count = self._extract_count(
r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view')
like_count = extract_vote_count('Up', 'like')
dislike_count = extract_vote_count('Down', 'dislike')
comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
def extract_list(meta_key):
div = self._search_regex(
r'(?s)<div[^>]+\bclass=["\'].*?\b%sWrapper[^>]*>(.+?)</div>'
% meta_key, webpage, meta_key, default=None)
if div:
return re.findall(r'<a[^>]+\bhref=[^>]+>([^<]+)', div)
info = self._search_json_ld(webpage, video_id, default={})
# description provided in JSON-LD is irrelevant
info['description'] = None
return merge_dicts({
'id': video_id,
'uploader': video_uploader,
'upload_date': upload_date,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'formats': formats,
'age_limit': 18,
'tags': extract_list('tags'),
'categories': extract_list('categories'),
'subtitles': subtitles,
}, info)
class PornHubPlaylistBaseIE(PornHubBaseIE):
def _extract_page(self, url):
return int_or_none(self._search_regex(
r'\bpage=(\d+)', url, 'page', default=None))
def _extract_entries(self, webpage, host):
# Only process container div with main playlist content skipping
# drop-down menu that uses similar pattern for videos (see
# https://github.com/ytdl-org/youtube-dl/issues/11594).
container = self._search_regex(
r'(?s)(<div[^>]+class=["\']container.+)', webpage,
'container', default=webpage)
return [
self.url_result(
'http://www.%s/%s' % (host, video_url),
PornHubIE.ie_key(), video_title=title)
for video_url, title in orderedSet(re.findall(
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
container))
]
class PornHubUserIE(PornHubPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/?#&]+))(?:[?#&]|/(?!videos)|$)'
_TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph',
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious',
'info_dict': {
'id': 'liz-vicious',
},
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/users/russianveet69',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/channels/povd',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/model/zoe_ph?abc=1',
'only_matching': True,
}, {
# Unavailable via /videos page, but available with direct pagination
# on pornstar page (see [1]), requires premium
# 1. https://github.com/ytdl-org/youtube-dl/issues/27853
'url': 'https://www.pornhubpremium.com/pornstar/sienna-west',
'only_matching': True,
}, {
# Same as before, multi page
'url': 'https://www.pornhubpremium.com/pornstar/lily-labeau',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('id')
videos_url = '%s/videos' % mobj.group('url')
page = self._extract_page(url)
if page:
videos_url = update_url_query(videos_url, {'page': page})
return self.url_result(
videos_url, ie=PornHubPagedVideoListIE.ie_key(), video_id=user_id)
class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
@staticmethod
def _has_more(webpage):
return re.search(
r'''(?x)
<li[^>]+\bclass=["\']page_next|
<link[^>]+\brel=["\']next|
<button[^>]+\bid=["\']moreDataBtn
''', webpage) is not None
def _entries(self, url, host, item_id):
page = self._extract_page(url)
VIDEOS = '/videos'
def download_page(base_url, num, fallback=False):
note = 'Downloading page %d%s' % (num, ' (switch to fallback)' if fallback else '')
return self._download_webpage(
base_url, item_id, note, query={'page': num})
def is_404(e):
return isinstance(e.cause, compat_HTTPError) and e.cause.code == 404
base_url = url
has_page = page is not None
first_page = page if has_page else 1
for page_num in (first_page, ) if has_page else itertools.count(first_page):
try:
try:
webpage = download_page(base_url, page_num)
except ExtractorError as e:
# Some sources may not be available via /videos page,
# trying to fallback to main page pagination (see [1])
# 1. https://github.com/ytdl-org/youtube-dl/issues/27853
if is_404(e) and page_num == first_page and VIDEOS in base_url:
base_url = base_url.replace(VIDEOS, '')
webpage = download_page(base_url, page_num, fallback=True)
else:
raise
except ExtractorError as e:
if is_404(e) and page_num != first_page:
break
raise
page_entries = self._extract_entries(webpage, host)
if not page_entries:
break
for e in page_entries:
yield e
if not self._has_more(webpage):
break
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
item_id = mobj.group('id')
self._login(host)
return self.playlist_result(self._entries(url, host, item_id), item_id)
class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?P<id>(?:[^/]+/)*[^/?#&]+)'
_TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph/videos',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/users/rushandlia/videos',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos',
'info_dict': {
'id': 'pornstar/jenny-blighe/videos',
},
'playlist_mincount': 149,
}, {
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos?page=3',
'info_dict': {
'id': 'pornstar/jenny-blighe/videos',
},
'playlist_mincount': 40,
}, {
# default sorting as Top Rated Videos
'url': 'https://www.pornhub.com/channels/povd/videos',
'info_dict': {
'id': 'channels/povd/videos',
},
'playlist_mincount': 293,
}, {
# Top Rated Videos
'url': 'https://www.pornhub.com/channels/povd/videos?o=ra',
'only_matching': True,
}, {
# Most Recent Videos
'url': 'https://www.pornhub.com/channels/povd/videos?o=da',
'only_matching': True,
}, {
# Most Viewed Videos
'url': 'https://www.pornhub.com/channels/povd/videos?o=vi',
'only_matching': True,
}, {
'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
'only_matching': True,
}, {
# Most Viewed Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=mv',
'only_matching': True,
}, {
# Top Rated Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=tr',
'only_matching': True,
}, {
# Longest Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=lg',
'only_matching': True,
}, {
# Newest Videos
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos?o=cm',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos/paid',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious/videos/fanonly',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video?page=3',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video/search?search=123',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/categories/teen',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/categories/teen?page=3',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/hd',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/hd?page=3',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/described-video',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/described-video?page=2',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/video/incategories/60fps-1/hd-porn',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/playlist/44121572',
'info_dict': {
'id': 'playlist/44121572',
},
'playlist_mincount': 132,
}, {
'url': 'https://www.pornhub.com/playlist/4667351',
'only_matching': True,
}, {
'url': 'https://de.pornhub.com/playlist/4667351',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return (False
if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url)
else super(PornHubPagedVideoListIE, cls).suitable(url))
class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?(?P<host>pornhub(?:premium)?\.(?:com|net|org))/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos/upload)'
_TESTS = [{
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos/upload',
'info_dict': {
'id': 'jenny-blighe',
},
'playlist_mincount': 129,
}, {
'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload',
'only_matching': True,
}]
|
unlicense
|
5c39d5adf81a651ccc1a8e8d870eefe2
| 35.916094
| 173
| 0.509278
| 3.570782
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/rice.py
|
90
|
4580
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_parse_qs
from ..utils import (
xpath_text,
xpath_element,
int_or_none,
parse_iso8601,
ExtractorError,
)
class RICEIE(InfoExtractor):
_VALID_URL = r'https?://mediahub\.rice\.edu/app/[Pp]ortal/video\.aspx\?(?P<query>.+)'
_TEST = {
'url': 'https://mediahub.rice.edu/app/Portal/video.aspx?PortalID=25ffd62c-3d01-4b29-8c70-7c94270efb3e&DestinationID=66bc9434-03bd-4725-b47e-c659d8d809db&ContentID=YEWIvbhb40aqdjMD1ALSqw',
'md5': '9b83b4a2eead4912dc3b7fac7c449b6a',
'info_dict': {
'id': 'YEWIvbhb40aqdjMD1ALSqw',
'ext': 'mp4',
'title': 'Active Learning in Archeology',
'upload_date': '20140616',
'timestamp': 1402926346,
}
}
_NS = 'http://schemas.datacontract.org/2004/07/ensembleVideo.Data.Service.Contracts.Models.Player.Config'
def _real_extract(self, url):
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query'))
if not qs.get('PortalID') or not qs.get('DestinationID') or not qs.get('ContentID'):
raise ExtractorError('Invalid URL', expected=True)
portal_id = qs['PortalID'][0]
playlist_id = qs['DestinationID'][0]
content_id = qs['ContentID'][0]
content_data = self._download_xml('https://mediahub.rice.edu/api/portal/GetContentTitle', content_id, query={
'portalId': portal_id,
'playlistId': playlist_id,
'contentId': content_id
})
metadata = xpath_element(content_data, './/metaData', fatal=True)
title = xpath_text(metadata, 'primaryTitle', fatal=True)
encodings = xpath_element(content_data, './/encodings', fatal=True)
player_data = self._download_xml('https://mediahub.rice.edu/api/player/GetPlayerConfig', content_id, query={
'temporaryLinkId': xpath_text(encodings, 'temporaryLinkId', fatal=True),
'contentId': content_id,
})
common_fmt = {}
dimensions = xpath_text(encodings, 'dimensions')
if dimensions:
wh = dimensions.split('x')
if len(wh) == 2:
common_fmt.update({
'width': int_or_none(wh[0]),
'height': int_or_none(wh[1]),
})
formats = []
rtsp_path = xpath_text(player_data, self._xpath_ns('RtspPath', self._NS))
if rtsp_path:
fmt = {
'url': rtsp_path,
'format_id': 'rtsp',
}
fmt.update(common_fmt)
formats.append(fmt)
for source in player_data.findall(self._xpath_ns('.//Source', self._NS)):
video_url = xpath_text(source, self._xpath_ns('File', self._NS))
if not video_url:
continue
if '.m3u8' in video_url:
formats.extend(self._extract_m3u8_formats(video_url, content_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
else:
fmt = {
'url': video_url,
'format_id': video_url.split(':')[0],
}
fmt.update(common_fmt)
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
if rtmp:
fmt.update({
'url': rtmp.group('url'),
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'ext': 'flv',
})
formats.append(fmt)
self._sort_formats(formats)
thumbnails = []
for content_asset in content_data.findall('.//contentAssets'):
asset_type = xpath_text(content_asset, 'type')
if asset_type == 'image':
image_url = xpath_text(content_asset, 'httpPath')
if not image_url:
continue
thumbnails.append({
'id': xpath_text(content_asset, 'ID'),
'url': image_url,
})
return {
'id': content_id,
'title': title,
'description': xpath_text(metadata, 'abstract'),
'duration': int_or_none(xpath_text(metadata, 'duration')),
'timestamp': parse_iso8601(xpath_text(metadata, 'dateUpdated')),
'thumbnails': thumbnails,
'formats': formats,
}
|
unlicense
|
0b6c3af7d48fbec20a47e2320f5e5dd5
| 38.482759
| 195
| 0.527948
| 3.643596
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/radiode.py
|
64
|
1820
|
from __future__ import unicode_literals
from .common import InfoExtractor
class RadioDeIE(InfoExtractor):
IE_NAME = 'radio.de'
_VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)'
_TEST = {
'url': 'http://ndr2.radio.de/',
'info_dict': {
'id': 'ndr2',
'ext': 'mp3',
'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:591c49c702db1a33751625ebfb67f273',
'thumbnail': r're:^https?://.*\.png',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
radio_id = self._match_id(url)
webpage = self._download_webpage(url, radio_id)
jscode = self._search_regex(
r"'components/station/stationService':\s*\{\s*'?station'?:\s*(\{.*?\s*\}),\n",
webpage, 'broadcast')
broadcast = self._parse_json(jscode, radio_id)
title = self._live_title(broadcast['name'])
description = broadcast.get('description') or broadcast.get('shortDescription')
thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl') or broadcast.get('logo100x100')
formats = [{
'url': stream['streamUrl'],
'ext': stream['streamContentFormat'].lower(),
'acodec': stream['streamContentFormat'],
'abr': stream['bitRate'],
'asr': stream['sampleRate']
} for stream in broadcast['streamUrls']]
self._sort_formats(formats)
return {
'id': radio_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'is_live': True,
'formats': formats,
}
|
unlicense
|
ccfd3c6a1cf2524ee41e2da80343bec2
| 34
| 117
| 0.520879
| 3.575639
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/turner.py
|
5
|
11115
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
url_or_none,
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
if not token:
query = {
'path': secure_path,
}
if custom_tokenizer_query:
query.update(custom_tokenizer_query)
else:
query['videoId'] = content_id
if ap_data.get('auth_required'):
query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
auth = self._download_xml(
tokenizer_src, content_id, query=query)
error_msg = xpath_text(auth, 'error/msg')
if error_msg:
raise ExtractorError(error_msg, expected=True)
token = xpath_text(auth, 'token')
if not token:
return video_url
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
return {}
video_id = video_data.attrib['id']
title = xpath_text(video_data, 'headline', fatal=True)
content_id = xpath_text(video_data, 'contentId') or video_id
# rtmp_src = xpath_text(video_data, 'akamai/src')
# if rtmp_src:
# split_rtmp_src = rtmp_src.split(',')
# if len(split_rtmp_src) == 2:
# rtmp_src = split_rtmp_src[1]
# aifp = xpath_text(video_data, 'akamai/aifp', default='')
urls = []
formats = []
thumbnails = []
subtitles = {}
rex = re.compile(
r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
# Possible formats locations: files/file, files/groupFiles/files
# and maybe others
for video_file in video_data.findall('.//file'):
video_url = url_or_none(video_file.text.strip())
if not video_url:
continue
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
# auth = self._download_webpage(
# protected_path_data['tokenizer_src'], query={
# 'path': protected_path,
# 'videoId': content_id,
# 'aifp': aifp,
# })
# token = xpath_text(auth, 'token')
# if not token:
# continue
# video_url = rtmp_src + video_url + '?' + token
elif video_url.startswith('/secure/'):
secure_path_data = path_data.get('secure')
if not secure_path_data:
continue
video_url = self._add_akamai_spe_token(
secure_path_data['tokenizer_src'],
secure_path_data['media_src'] + video_url,
content_id, ap_data)
elif not re.match('https?://', video_url):
base_path_data = path_data.get(ext, path_data.get('default', {}))
media_src = base_path_data.get('media_src')
if not media_src:
continue
video_url = media_src + video_url
if video_url in urls:
continue
urls.append(video_url)
format_id = video_file.get('bitrate')
if ext in ('scc', 'srt', 'vtt'):
subtitles.setdefault('en', []).append({
'ext': ext,
'url': video_url,
})
elif ext == 'png':
thumbnails.append({
'id': format_id,
'url': video_url,
})
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/', video_url):
formats.extend(self._extract_akamai_formats(
video_url, video_id, {
'hds': path_data.get('f4m', {}).get('host'),
# nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com
# ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com
# ssl.cdn.turner.com
'http': 'pmd.cdn.turner.com',
}))
elif ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4',
m3u8_id=format_id or 'hls', fatal=False)
if '/secure/' in video_url and '?hdnea=' in video_url:
for f in m3u8_formats:
f['_seekable'] = False
formats.extend(m3u8_formats)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {'hdcore': '3.7.0'}),
video_id, f4m_id=format_id or 'hds', fatal=False))
else:
f = {
'format_id': format_id,
'url': video_url,
'ext': ext,
}
mobj = rex.search(video_url)
if mobj:
f.update({
'width': int(mobj.group('width')),
'height': int(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
elif isinstance(format_id, compat_str):
if format_id.isdigit():
f['tbr'] = int(format_id)
else:
mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
if mobj:
if mobj.group(1) == 'audio':
f.update({
'vcodec': 'none',
'ext': 'm4a',
})
else:
f['tbr'] = int(mobj.group(1))
formats.append(f)
self._sort_formats(formats)
for source in video_data.findall('closedCaptions/source'):
for track in source.findall('track'):
track_url = url_or_none(track.get('url'))
if not track_url or track_url.endswith('/big'):
continue
lang = track.get('lang') or track.get('label') or 'en'
subtitles.setdefault(lang, []).append({
'url': track_url,
'ext': {
'scc': 'scc',
'webvtt': 'vtt',
'smptett': 'tt',
}.get(source.get('format'))
})
thumbnails.extend({
'id': image.get('cut') or image.get('name'),
'url': image.text,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data.findall('images/image'))
is_live = xpath_text(video_data, 'isLive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'thumbnail': xpath_text(video_data, 'poster'),
'description': strip_or_none(xpath_text(video_data, 'description')),
'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
'timestamp': self._extract_timestamp(video_data),
'upload_date': xpath_attr(video_data, 'metas', 'version'),
'series': xpath_text(video_data, 'showTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'is_live': is_live,
}
def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
streams_data = self._download_json(
'http://medium.ngtv.io/media/%s/tv' % media_id,
media_id)['media']['tv']
duration = None
chapters = []
formats = []
for supported_type in ('unprotected', 'bulkaes'):
stream_data = streams_data.get(supported_type, {})
m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
if not m3u8_url:
continue
if stream_data.get('playlistProtection') == 'spe':
m3u8_url = self._add_akamai_spe_token(
'http://token.ngtv.io/token/token_spe',
m3u8_url, media_id, ap_data or {}, tokenizer_query)
formats.extend(self._extract_m3u8_formats(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
duration = float_or_none(stream_data.get('totalRuntime'))
if not chapters:
for chapter in stream_data.get('contentSegments', []):
start_time = float_or_none(chapter.get('start'))
chapter_duration = float_or_none(chapter.get('duration'))
if start_time is None or chapter_duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + chapter_duration,
})
self._sort_formats(formats)
return {
'formats': formats,
'chapters': chapters,
'duration': duration,
}
|
unlicense
|
85595fdd6b1736a1a4fda8872d7f4593
| 41.75
| 134
| 0.474854
| 3.938696
| false
| false
| false
| false
|
unitedstates/congress-legislators
|
scripts/alternate_bulk_formats.py
|
1
|
5823
|
import csv
import json
import glob
import os
import utils
def generate_csv():
#yaml filenames
yamls = ["legislators-current.yaml","legislators-historical.yaml"]
yaml_social = "legislators-social-media.yaml"
#list of yaml field name, csv column name tuples. Split into categories which do not reflect yaml structure (structured for logical csv column ordering)
bio_fields = [
("last", "last_name"),
("first", "first_name"),
("middle", "middle_name"),
("suffix", "suffix"),
("nickname", "nickname"),
("official_full", "full_name"),
("birthday", "birthday"),
("gender", "gender")
]
#ID crosswalks, omit FEC id's, which may contain (arbitrary?) number of values
crosswalk_fields = [
("bioguide", "bioguide_id"),
("thomas", "thomas_id"),
("opensecrets", "opensecrets_id"),
("lis","lis_id"),
("fec","fec_ids"),
("cspan", "cspan_id"),
("govtrack", "govtrack_id"),
("votesmart", "votesmart_id"),
("ballotpedia", "ballotpedia_id"),
("washington_post", "washington_post_id"),
("icpsr", "icpsr_id"),
("wikipedia", "wikipedia_id")
]
#separate list for children of "terms", csv only captures data for most recent term
#currently excluding start/end dates - earliest start to latest end is deceptive (excludes gaps) as is start/end for most recent term
term_fields = [
("type", "type"),
("state", "state"),
("district", "district"),
("class", "senate_class"),
("party", "party"),
("url", "url"),
("address", "address"),
("phone", "phone"),
("contact_form", "contact_form"),
("rss_url", "rss_url"),
]
#pulled from legislators-social-media.yaml
social_media_fields = [
("twitter", "twitter"),
("facebook", "facebook"),
("youtube", "youtube"),
("youtube_id", "youtube_id")
]
print("Loading %s..." %yaml_social)
social = utils.load_data(yaml_social)
for filename in yamls:
print("Converting %s to CSV..." % filename)
legislators = utils.load_data(filename)
#convert yaml to csv
csv_output = csv.writer(open("../" + filename.replace(".yaml", ".csv"),"w"))
head = []
for pair in bio_fields:
head.append(pair[1])
for pair in term_fields:
head.append(pair[1])
for pair in social_media_fields:
head.append(pair[1])
for pair in crosswalk_fields:
head.append(pair[1])
csv_output.writerow(head)
for legislator in legislators:
legislator_row = []
for pair in bio_fields:
if 'name' in legislator and pair[0] in legislator['name']:
legislator_row.append(legislator['name'][pair[0]])
elif 'bio' in legislator and pair[0] in legislator['bio']:
legislator_row.append(legislator['bio'][pair[0]])
else:
legislator_row.append(None)
for pair in term_fields:
latest_term = legislator['terms'][len(legislator['terms'])-1]
if pair[0] in latest_term:
legislator_row.append(latest_term[pair[0]])
else:
legislator_row.append(None)
social_match = None
for social_legislator in social:
if 'bioguide' in legislator['id'] and 'bioguide' in social_legislator['id'] and legislator['id']['bioguide'] == social_legislator['id']['bioguide']:
social_match = social_legislator
break
elif 'thomas' in legislator['id'] and 'thomas' in social_legislator['id'] and legislator['id']['thomas'] == social_legislator['id']['thomas']:
social_match = social_legislator
break
elif 'govtrack' in legislator['id'] and 'govtrack' in social_legislator['id'] and legislator['id']['govtrack'] == social_legislator['id']['govtrack']:
social_match = social_legislator
break
for pair in social_media_fields:
if social_match != None:
if pair[0] in social_match['social']:
legislator_row.append(social_match['social'][pair[0]])
else:
legislator_row.append(None)
else:
legislator_row.append(None)
for pair in crosswalk_fields:
if pair[0] in legislator['id']:
value = legislator['id'][pair[0]]
if isinstance(value, list):
# make FEC IDs comma-separated
value = ",".join(value)
legislator_row.append(value)
else:
legislator_row.append(None)
csv_output.writerow(legislator_row)
generate_district_office_csv()
def generate_district_office_csv():
filename = "legislators-district-offices.yaml"
print("Converting %s to CSV..." % filename)
legislators_offices = utils.load_data(filename)
fields = [
"bioguide", "thomas", "govtrack", "id", "address", "building",
"city", "fax", "hours", "phone", "state", "suite", "zip",
"latitude", "longitude"]
f = open("../" + filename.replace(".yaml", ".csv"), "w")
csv_output = csv.DictWriter(f, fieldnames=fields)
csv_output.writeheader()
for legislator_offices in legislators_offices:
legislator_ids = legislator_offices['id']
for office in legislator_offices['offices']:
office.update(legislator_ids)
csv_output.writerow(office)
def generate_json():
#yaml filenames
yamls = list(map(os.path.basename, glob.glob("../*.yaml")))
for filename in yamls:
print("Converting %s to JSON..." % filename)
data = utils.load_data(filename)
'''handle edge case of incorrect coercion for twitter ids in social media data
json/js can only handle maximum of 53-bit integers, so 64-bit integer twitter ids *must* be stringified
to consistently preserve value in json. otherwise they may be rounded and malformed
'''
if 'legislators-social-media' in filename:
for social_legislator in data:
if 'twitter_id' in social_legislator['social']:
social_legislator['social']['twitter_id'] = str(social_legislator['social']['twitter_id'])
#convert yaml to json
utils.write(
json.dumps(data, default=utils.format_datetime, indent=2),
"../" + filename.replace(".yaml", ".json"))
if __name__ == '__main__':
generate_csv()
generate_json()
|
cc0-1.0
|
fe6ad2e3804aec843ef2cfce056c03ec
| 29.973404
| 154
| 0.656363
| 3.007748
| false
| false
| false
| false
|
awslabs/aws-config-rules
|
python/IAM_USER_MATCHES_REGEX_PATTERN/IAM_USER_MATCHES_REGEX_PATTERN_test.py
|
1
|
9892
|
'''
This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
Created with the Rule Development Kit: https://github.com/awslabs/aws-config-rdk
Can be used stand-alone or with the Rule Compliance Engine: https://github.com/awslabs/aws-config-engine-for-compliance-as-code
'''
import sys
import json
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::IAM::User'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
IAM_CLIENT_MOCK = MagicMock()
class Boto3Mock():
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('IAM_USER_MATCHES_REGEX_PATTERN')
class ComplianceTest(unittest.TestCase):
user_list = {'Users': [
{'UserId': 'AIDAIDFOUX2OSRO6DO7XA',
'UserName': 'user-name-0'},
{'UserId': 'AIDAIDFOUX2OSRO6DO7XB',
'UserName': 'user-name-admin-1'},
{'UserId': 'AIDAIDFOUX2OSRO6DO7XF',
'UserName': 'Admin-user-badexpress-2'},
{'UserId': 'AIDAIDFOUX2OSRO6DO7XG',
'UserName': 'Admin-user-no-pattern-3'}]}
def test_scenario_1_no_pattern(self):
"""Test scenario to test when pattern is not specified
Keyword arguments:
self -- class ComplianceTest
"""
IAM_CLIENT_MOCK.list_users = MagicMock(return_value=self.user_list)
rule_param = {}
invoking_event = construct_invoking_event(construct_config_item(self.user_list['Users'][3]['UserName']))
lambda_event = build_lambda_configurationchange_event(invoking_event, rule_parameters=rule_param)
response = RULE.lambda_handler(lambda_event, {})
print(response)
assert_customer_error_response(self, response, "InvalidParameterValueException")
def test_scenario_2_invalid_regex(self):
"""Test scenario to test an invaild regex pattern
Keyword arguments:
self -- class ComplianceTest
"""
IAM_CLIENT_MOCK.list_users = MagicMock(return_value=self.user_list)
rule_param = {"regexPattern":"[bad"}
invoking_event = construct_invoking_event(construct_config_item(self.user_list['Users'][2]['UserName']))
lambda_event = build_lambda_configurationchange_event(invoking_event, rule_parameters=rule_param)
response = RULE.lambda_handler(lambda_event, {})
print(response)
assert_customer_error_response(self, response, "InvalidParameterValueException")
def test_scenario_3_non_compliant(self):
"""Test scenario to test non-compliant user name
Keyword arguments:
self -- class ComplianceTest
"""
IAM_CLIENT_MOCK.list_users = MagicMock(return_value=self.user_list)
rule_param = {"regexPattern":"admin"}
invoking_event = construct_invoking_event(construct_config_item(self.user_list['Users'][0]['UserName']))
lambda_event = build_lambda_configurationchange_event(invoking_event, rule_parameters=rule_param)
response = RULE.lambda_handler(lambda_event, {})
print(response)
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', self.user_list['Users'][0]['UserName'], annotation='The regex (admin) does not match (user-name-0).'))
assert_successful_evaluation(self, response, resp_expected)
def test_scenario_4_compliant(self):
"""Test scenario to test compliant user name
Keyword arguments:
self -- class ComplianceTest
"""
IAM_CLIENT_MOCK.list_users = MagicMock(return_value=self.user_list)
rule_param = {"regexPattern":".*admin.*"}
invoking_event = construct_invoking_event(construct_config_item(self.user_list['Users'][1]['UserName']))
lambda_event = build_lambda_configurationchange_event(invoking_event, rule_parameters=rule_param)
response = RULE.lambda_handler(lambda_event, {})
print(response)
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', self.user_list['Users'][1]['UserName']))
assert_successful_evaluation(self, response, resp_expected)
####################
# Helper Functions #
####################
def construct_config_item(resource_name):
config_item = {
'relatedEvents': [],
'relationships': [],
'configuration': None,
'configurationItemVersion': None,
'configurationItemCaptureTime': "2019-03-17T03:37:52.418Z",
'supplementaryConfiguration': {},
'configurationStateId': 1532049940079,
'awsAccountId': "SAMPLE",
'configurationItemStatus': "ResourceDiscovered",
'resourceType': "AWS::IAM::User",
'resourceId': "AIDAILEDWOGIPJFAKOJKW",
'resourceName': resource_name,
'ARN': "arn:aws:iam::264683526309:user/{}".format(resource_name),
'awsRegion': "ap-south-1",
'configurationStateMd5Hash': "",
'resourceCreationTime': "2019-03-17T06:27:28.289Z",
'tags': {}
}
return config_item
def construct_invoking_event(config_item):
invoking_event = {
"configurationItemDiff": None,
"configurationItem": config_item,
"notificationCreationTime": "SAMPLE",
"messageType": "ConfigurationItemChangeNotification",
"recordVersion": "SAMPLE"
}
return invoking_event
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': json.dumps(invoking_event),
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = json.dumps(rule_parameters)
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
|
cc0-1.0
|
098eca1f8b5eada9e9985b8c4f6c00b0
| 42.964444
| 172
| 0.671452
| 3.908337
| false
| true
| false
| false
|
awslabs/aws-config-rules
|
python/VPC_VPN_2_TUNNELS_UP/VPC_VPN_2_TUNNELS_UP_test.py
|
1
|
9093
|
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
import botocore
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::EC2::VPNConnection'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
class Boto3Mock():
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
if client_name == 'sts':
return STS_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('VPC_VPN_2_TUNNELS_UP')
class ComplianceTest(unittest.TestCase):
#Gherkin scenario 1: At least one tunnel down.
def test_scenario_1_one_tunnel_is_down(self):
invoking_event_vpn = '{ \
"messageType": "ConfigurationItemChangeNotification", \
"configurationItem": { \
"configurationItemStatus": "OK", \
"resourceType": "AWS::EC2::VPNConnection", \
"resourceId": "some-resource-id", \
"configurationItemCaptureTime": "2019-04-18T08:49:09.878Z", \
"configuration": { \
"vgwTelemetry": [{ \
"status": "DOWN", \
"statusMessage": "IPSEC IS DOWN" \
}, { \
"status": "UP", \
"statusMessage": "IPSEC IS UP" \
}] \
} \
} \
}'
response = RULE.lambda_handler(build_lambda_configurationchange_event(invoking_event_vpn), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'some-resource-id', annotation="This AWS VPN Connection has at least one VPN tunnel down with statusMessage: IPSEC IS DOWN"))
assert_successful_evaluation(self, response, resp_expected)
#Gherkin scenario 2: Both tunnels are up.
def test_scenario_2_both_tunnels_are_up(self):
invoking_event_vpn = '{ \
"messageType": "ConfigurationItemChangeNotification", \
"configurationItem": { \
"configurationItemStatus": "OK", \
"resourceType": "AWS::EC2::VPNConnection", \
"resourceId": "some-resource-id", \
"configurationItemCaptureTime": "2019-04-18T08:49:09.878Z", \
"configuration": { \
"vgwTelemetry": [{ \
"status": "UP", \
"statusMessage": "IPSEC IS UP" \
}, { \
"status": "UP", \
"statusMessage": "IPSEC IS UP" \
}] \
} \
} \
}'
response = RULE.lambda_handler(build_lambda_configurationchange_event(invoking_event_vpn), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'some-resource-id'))
assert_successful_evaluation(self, response, resp_expected)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
RULE.ASSUME_ROLE_MODE = True
RULE.evaluate_parameters = MagicMock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
RULE.ASSUME_ROLE_MODE = True
RULE.evaluate_parameters = MagicMock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
|
cc0-1.0
|
843d943bc2ea484d6cd6c1273d32ef9b
| 42.927536
| 195
| 0.639393
| 4.052139
| false
| true
| false
| false
|
awslabs/aws-config-rules
|
python/LAMBDA_DLQ_CHECK/LAMBDA_DLQ_CHECK_test.py
|
1
|
9631
|
import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
import json
import botocore
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::Lambda::Function'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
class Boto3Mock():
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
if client_name == 'sts':
return STS_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('LAMBDA_DLQ_CHECK')
class SampleTest(unittest.TestCase):
rule_invalid_parameter = '{"dlqArn":"arn:aws:sns:us-east-1:123456789012:mytopic, arn:aws:sss:us-east-1:123456789012:myq"}'
rule_valid_parameter = '{"dlqArn":"arn:aws:sns:us-east-1:123456789012:mytopic, arn:aws:sqs:us-east-1:123456789012:myq"}'
rule_parameter_mismatch = '{"dlqArn":"arn:aws:sns:us-east-1:123456789012:mytopic2, arn:aws:sqs:us-east-1:123456789012:myq2"}'
valid_dlqarn = {
"functionName": "test_function",
"functionArn": "arn:aws:lambda:us-west-2:123456789012:function:test_function",
"deadLetterConfig": "arn:aws:sns:us-east-1:123456789012:mytopic"
}
no_dql_configured = {
"functionName": "test_function",
"functionArn": "arn:aws:lambda:us-west-2:123456789012:function:test_function"
}
def test_scenario_1_invalid_parameter_value(self):
invoking_event = generate_invoking_event(self.valid_dlqarn)
response = RULE.lambda_handler(
build_lambda_configurationchange_event(invoking_event, rule_parameters=self.rule_invalid_parameter), {})
assert_customer_error_response(self, response, 'InvalidParameterValueException',
'Invalid value for the parameter "dlqArn", Expected Comma-separated list of '
'valid SQS or SNS ARNs.')
def test_scenario_3_empty_parameter_value(self):
invoking_event = generate_invoking_event(self.valid_dlqarn)
response = RULE.lambda_handler(
build_lambda_configurationchange_event(invoking_event, {}), {})
assert_successful_evaluation(self, response, [build_expected_response('COMPLIANT', '123456789012')])
def test_scenario_2_no_dlq_configured(self):
invoking_event = generate_invoking_event(self.no_dql_configured)
response = RULE.lambda_handler(
build_lambda_configurationchange_event(invoking_event, rule_parameters=self.rule_valid_parameter), {})
assert_successful_evaluation(self, response, [build_expected_response('NON_COMPLIANT', '123456789012', annotation='This Lambda function is not configured for DLQ.')])
def test_scenario_4_no_dlq_match(self):
invoking_event = generate_invoking_event(self.valid_dlqarn)
response = RULE.lambda_handler(
build_lambda_configurationchange_event(invoking_event, rule_parameters=self.rule_parameter_mismatch), {})
assert_successful_evaluation(self, response, [build_expected_response('NON_COMPLIANT', '123456789012', annotation='This Lambda Function is not associated with the DLQ specified in the dlqArn input parameter.')])
def test_scenario_5_dlq_match(self):
invoking_event = generate_invoking_event(self.valid_dlqarn)
response = RULE.lambda_handler(
build_lambda_configurationchange_event(invoking_event, rule_parameters=self.rule_valid_parameter), {})
assert_successful_evaluation(self, response, [build_expected_response('COMPLIANT', '123456789012')])
####################
# Helper Functions #
####################
def generate_invoking_event(test_configuration):
invoking_event = '{"configurationItem":{"configuration":' \
+ json.dumps(test_configuration) \
+ ',"configurationItemCaptureTime":"2019-04-18T08:17:52.315Z","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::Lambda::Function","resourceId":"123456789012"},"messageType":"ConfigurationItemChangeNotification"}'
return invoking_event
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName': 'myrule',
'executionRoleArn': 'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken': 'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName': 'myrule',
'executionRoleArn': 'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken': 'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE,
annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
RULE.ASSUME_ROLE_MODE = True
STS_CLIENT_MOCK.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = RULE.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
|
cc0-1.0
|
e3a4508d0e30495d469746e4b96f4db2
| 47.155
| 251
| 0.679265
| 3.932626
| false
| true
| false
| false
|
mcedit/mcedit
|
filters/demo/filterdemo.py
|
1
|
5188
|
# the inputs list tells MCEdit what kind of options to present to the user.
# each item is a (name, value) pair. name is a text string acting
# both as a text label for the input on-screen and a key for the 'options'
# parameter to perform(). value and its type indicate allowable and
# default values for the option:
# True or False: creates a checkbox with the given value as default
# int or float value: creates a value input with the given value as default
# int values create fields that only accept integers.
# tuple of numbers: a tuple of ints or floats creates a value input with minimum and
# maximum values. a 2-tuple specifies (min, max) with min as default.
# a 3-tuple specifies (default, min, max)
# tuple of strings: a tuple of strings creates a popup menu whose entries are
# labeled with the given strings. the first item in the tuple is selected
# by default. returns one of the strings in the tuple.
# "blocktype" as a string: creates a button the user can click to choose
# a block type in a list. returns a Block object. the object has 'ID'
# and 'blockData' attributes.
# this dictionary creates an integer input with range (-128, 128) and default 4,
# a blocktype picker, a floating-point input with no limits and default 15.0,
# a checkbox initially checked, and a menu of choices
inputs = (
("Depth", (4, -128, 128)),
("Pick a block:", "blocktype"),
("Fractal complexity", 15.0),
("Enable thrusters", True),
("Access method", ("Use blockAt", "Use temp schematic", "Use chunk slices")),
)
# perform() is the main entry point of a filter. Its parameters are
# a MCLevel instance, a BoundingBox, and an options dictionary.
# The options dictionary will have keys corresponding to the keys specified above,
# and values reflecting the user's input.
# you get undo for free: everything within 'box' is copied to a temporary buffer
# before perform is called, and then copied back when the user asks to undo
def perform(level, box, options):
blockType = options["Pick a block:"].ID
complexity = options["Fractal complexity"]
if options["Enable thrusters"]:
# Errors will alert the user and print a traceback to the console.
raise NotImplementedError("Thrusters not attached!")
method = options["Access method"]
# There are a few general ways of accessing a level's blocks
# The first is using level.blockAt and level.setBlockAt
# These are slower than the other two methods, but easier to start using
if method == "Use blockAt":
for x in xrange(box.minx, box.maxx):
for z in xrange(box.minz, box.maxz):
for y in xrange(box.miny, box.maxy): # nested loops can be slow
# replaces gold with TNT. straightforward.
if level.blockAt(x, y, z) == 14:
level.setBlockAt(x, y, z, 46)
# The second is to extract the segment of interest into a contiguous array
# using level.extractSchematic. this simplifies using numpy but at the cost
# of the temporary buffer and the risk of a memory error on 32-bit systems.
if method == "Use temp schematic":
temp = level.extractSchematic(box)
# remove any entities in the temp. this is an ugly move
# because copyBlocksFrom actually copies blocks, entities, everything
temp.removeEntitiesInBox(temp.bounds)
temp.removeTileEntitiesInBox(temp.bounds)
# replaces gold with TNT.
# the expression in [] creates a temporary the same size, using more memory
temp.Blocks[temp.Blocks == 14] = 46
level.copyBlocksFrom(temp, temp.bounds, box.origin)
# The third method iterates over each subslice of every chunk in the area
# using level.getChunkSlices. this method is a bit arcane, but lets you
# visit the affected area chunk by chunk without using too much memory.
if method == "Use chunk slices":
for (chunk, slices, point) in level.getChunkSlices(box):
# chunk is an AnvilChunk object with attributes:
# Blocks, Data, Entities, and TileEntities
# Blocks and Data can be indexed using slices:
blocks = chunk.Blocks[slices]
# blocks now contains a "view" on the part of the chunk's blocks
# that lie in the selection. This "view" is a numpy object that
# accesses only a subsection of the original array, without copying
# once again, gold into TNT
blocks[blocks == 14] = 46
# notify the world that the chunk changed
# this gives finer control over which chunks are dirtied
# you can call chunk.chunkChanged(False) if you want to dirty it
# but not run the lighting calc later.
chunk.chunkChanged()
# You can also access the level any way you want
# Beware though, you only get to undo the area within the specified box
pos = level.getPlayerPosition()
cpos = pos[0] >> 4, pos[2] >> 4
chunk = level.getChunk(*cpos)
chunk.Blocks[::4, ::4, :64] = 46 # replace every 4x4th column of land with TNT
|
isc
|
4b8bf51a644eccee5042559385164314
| 44.911504
| 87
| 0.673092
| 4.078616
| false
| false
| false
| false
|
mcedit/mcedit
|
editortools/operation.py
|
1
|
4107
|
import atexit
import os
import shutil
import tempfile
import albow
from pymclevel import BoundingBox
import numpy
from albow.root import Cancel
import pymclevel
from mceutils import showProgress
from pymclevel.mclevelbase import exhaust
undo_folder = os.path.join(tempfile.gettempdir(), "mcedit_undo", str(os.getpid()))
def mkundotemp():
if not os.path.exists(undo_folder):
os.makedirs(undo_folder)
return tempfile.mkdtemp("mceditundo", dir=undo_folder)
atexit.register(shutil.rmtree, undo_folder, True)
class Operation(object):
changedLevel = True
undoLevel = None
def __init__(self, editor, level):
self.editor = editor
self.level = level
def extractUndo(self, level, box):
if isinstance(level, pymclevel.MCInfdevOldLevel):
return self.extractUndoChunks(level, box.chunkPositions, box.chunkCount)
else:
return self.extractUndoSchematic(level, box)
def extractUndoChunks(self, level, chunks, chunkCount = None):
if not isinstance(level, pymclevel.MCInfdevOldLevel):
chunks = numpy.array(list(chunks))
mincx, mincz = numpy.min(chunks, 0)
maxcx, maxcz = numpy.max(chunks, 0)
box = BoundingBox((mincx << 4, 0, mincz << 4), (maxcx << 4, level.Height, maxcz << 4))
return self.extractUndoSchematic(level, box)
undoLevel = pymclevel.MCInfdevOldLevel(mkundotemp(), create=True)
if not chunkCount:
try:
chunkCount = len(chunks)
except TypeError:
chunkCount = -1
def _extractUndo():
yield 0, 0, "Recording undo..."
for i, (cx, cz) in enumerate(chunks):
undoLevel.copyChunkFrom(level, cx, cz)
yield i, chunkCount, "Copying chunk %s..." % ((cx, cz),)
undoLevel.saveInPlace()
if chunkCount > 25 or chunkCount < 1:
if "Canceled" == showProgress("Recording undo...", _extractUndo(), cancel=True):
if albow.ask("Continue with undo disabled?", ["Continue", "Cancel"]) == "Cancel":
raise Cancel
else:
return None
else:
exhaust(_extractUndo())
return undoLevel
def extractUndoSchematic(self, level, box):
if box.volume > 131072:
sch = showProgress("Recording undo...", level.extractZipSchematicIter(box), cancel=True)
else:
sch = level.extractZipSchematic(box)
if sch == "Cancel":
raise Cancel
if sch:
sch.sourcePoint = box.origin
return sch
# represents a single undoable operation
def perform(self, recordUndo=True):
" Perform the operation. Record undo information if recordUndo"
def undo(self):
""" Undo the operation. Ought to leave the Operation in a state where it can be performed again.
Default implementation copies all chunks in undoLevel back into level. Non-chunk-based operations
should override this."""
if self.undoLevel:
def _undo():
yield 0, 0, "Undoing..."
if hasattr(self.level, 'copyChunkFrom'):
for i, (cx, cz) in enumerate(self.undoLevel.allChunks):
self.level.copyChunkFrom(self.undoLevel, cx, cz)
yield i, self.undoLevel.chunkCount, "Copying chunk %s..." % ((cx, cz),)
else:
for i in self.level.copyBlocksFromIter(self.undoLevel, self.undoLevel.bounds, self.undoLevel.sourcePoint, biomes=True):
yield i, self.undoLevel.chunkCount, "Copying..."
if self.undoLevel.chunkCount > 25:
showProgress("Undoing...", _undo())
else:
exhaust(_undo())
self.editor.invalidateChunks(self.undoLevel.allChunks)
def dirtyBox(self):
""" The region modified by the operation.
Return None to indicate no blocks were changed.
"""
return None
|
isc
|
8f69e77019208e7278ea46718e782391
| 34.102564
| 139
| 0.597029
| 4.119358
| false
| false
| false
| false
|
mcedit/mcedit
|
filters/floodwater.py
|
1
|
2494
|
from numpy import *
from pymclevel import alphaMaterials, faceDirections, FaceYIncreasing
from collections import deque
import datetime
displayName = "Classic Water Flood"
inputs = (
("Makes water in the region flood outwards and downwards, becoming full source blocks in the process. This is similar to Minecraft Classic water.", "label"),
("Flood Water", True),
("Flood Lava", False),
)
def perform(level, box, options):
def floodFluid(waterIDs, waterID):
waterTable = zeros(256, dtype='bool')
waterTable[waterIDs] = True
coords = []
for chunk, slices, point in level.getChunkSlices(box):
water = waterTable[chunk.Blocks[slices]]
chunk.Data[slices][water] = 0 # source block
x, z, y = water.nonzero()
x = x + (point[0] + box.minx)
z = z + (point[2] + box.minz)
y = y + (point[1] + box.miny)
coords.append(transpose((x, y, z)))
print "Stacking coords..."
coords = vstack(tuple(coords))
def processCoords(coords):
newcoords = deque()
for (x, y, z) in coords:
for _dir, offsets in faceDirections:
if _dir == FaceYIncreasing:
continue
dx, dy, dz = offsets
p = (x + dx, y + dy, z + dz)
if p not in box:
continue
nx, ny, nz = p
if level.blockAt(nx, ny, nz) == 0:
level.setBlockAt(nx, ny, nz, waterID)
newcoords.append(p)
return newcoords
def spread(coords):
while len(coords):
start = datetime.datetime.now()
num = len(coords)
print "Did {0} coords in ".format(num),
coords = processCoords(coords)
d = datetime.datetime.now() - start
print d
yield "Did {0} coords in {1}".format(num, d)
level.showProgress("Spreading water...", spread(coords), cancel=True)
if options["Flood Water"]:
waterIDs = [alphaMaterials.WaterActive.ID, alphaMaterials.Water.ID]
waterID = alphaMaterials.Water.ID
floodFluid(waterIDs, waterID)
if options["Flood Lava"]:
lavaIDs = [alphaMaterials.LavaActive.ID, alphaMaterials.Lava.ID]
lavaID = alphaMaterials.Lava.ID
floodFluid(lavaIDs, lavaID)
|
isc
|
b95c1a52e81363ce977d3cfd7bab858d
| 32.702703
| 159
| 0.543304
| 3.872671
| false
| false
| false
| false
|
mcedit/mcedit
|
glutils.py
|
1
|
7859
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
glutils.py
Pythonesque wrappers around certain OpenGL functions.
"""
from OpenGL import GL
from OpenGL.GL.ARB import window_pos
import numpy
import functools
from contextlib import contextmanager
from albow import Label
from albow.openglwidgets import GLOrtho
import config
import weakref
from OpenGL.GL import framebufferobjects as FBO
import sys
class gl(object):
@classmethod
def ResetGL(cls):
DisplayList.invalidateAllLists()
@classmethod
@contextmanager
def glPushMatrix(cls, matrixmode):
try:
GL.glMatrixMode(matrixmode)
GL.glPushMatrix()
yield
finally:
GL.glMatrixMode(matrixmode)
GL.glPopMatrix()
@classmethod
@contextmanager
def glPushAttrib(cls, attribs):
try:
GL.glPushAttrib(attribs)
yield
finally:
GL.glPopAttrib()
@classmethod
@contextmanager
def glBegin(cls, type):
try:
GL.glBegin(type)
yield
finally:
GL.glEnd()
@classmethod
@contextmanager
def glEnable(cls, *enables):
try:
GL.glPushAttrib(GL.GL_ENABLE_BIT)
for e in enables:
GL.glEnable(e)
yield
finally:
GL.glPopAttrib()
@classmethod
@contextmanager
def glEnableClientState(cls, *enables):
try:
GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)
for e in enables:
GL.glEnableClientState(e)
yield
finally:
GL.glPopClientAttrib()
listCount = 0
@classmethod
def glGenLists(cls, n):
cls.listCount += n
return GL.glGenLists(n)
@classmethod
def glDeleteLists(cls, base, n):
cls.listCount -= n
return GL.glDeleteLists(base, n)
class DisplayList(object):
allLists = []
def __init__(self, drawFunc=None):
self.drawFunc = drawFunc
self._list = None
def _delete(r):
DisplayList.allLists.remove(r)
self.allLists.append(weakref.ref(self, _delete))
def __del__(self):
self.invalidate()
@classmethod
def invalidateAllLists(self):
allLists = []
for listref in self.allLists:
list = listref()
if list:
list.invalidate()
allLists.append(listref)
self.allLists = allLists
def invalidate(self):
if self._list:
gl.glDeleteLists(self._list[0], 1)
self._list = None
def makeList(self, drawFunc):
if self._list:
return
drawFunc = (drawFunc or self.drawFunc)
if drawFunc is None:
return
l = gl.glGenLists(1)
GL.glNewList(l, GL.GL_COMPILE)
drawFunc()
#try:
GL.glEndList()
#except GL.GLError:
# print "Error while compiling display list. Retrying display list code to pinpoint error"
# self.drawFunc()
self._list = numpy.array([l], 'uintc')
def getList(self, drawFunc=None):
self.makeList(drawFunc)
return self._list
if "-debuglists" in sys.argv:
def call(self, drawFunc=None):
drawFunc = (drawFunc or self.drawFunc)
if drawFunc is None:
return
drawFunc()
else:
def call(self, drawFunc=None):
self.makeList(drawFunc)
GL.glCallLists(self._list)
class Texture(object):
allTextures = []
defaultFilter = GL.GL_NEAREST
def __init__(self, textureFunc=None, minFilter=None, magFilter=None):
minFilter = minFilter or self.defaultFilter
magFilter = magFilter or self.defaultFilter
if textureFunc is None:
textureFunc = lambda: None
self.textureFunc = textureFunc
self._texID = GL.glGenTextures(1)
def _delete(r):
Texture.allTextures.remove(r)
self.allTextures.append(weakref.ref(self, _delete))
self.bind()
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, minFilter)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, magFilter)
self.textureFunc()
def __del__(self):
self.delete()
def delete(self):
if self._texID is not None:
GL.glDeleteTextures(self._texID)
def bind(self):
GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
def invalidate(self):
self.dirty = True
class FramebufferTexture(Texture):
def __init__(self, width, height, drawFunc):
tex = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA8, width, height, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, None)
self.enabled = False
self._texID = tex
if bool(FBO.glGenFramebuffers) and "Intel" not in GL.glGetString(GL.GL_VENDOR):
buf = FBO.glGenFramebuffers(1)
depthbuffer = FBO.glGenRenderbuffers(1)
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, buf)
FBO.glBindRenderbuffer(FBO.GL_RENDERBUFFER, depthbuffer)
FBO.glRenderbufferStorage(FBO.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, width, height)
FBO.glFramebufferRenderbuffer(FBO.GL_FRAMEBUFFER, FBO.GL_DEPTH_ATTACHMENT, FBO.GL_RENDERBUFFER, depthbuffer)
FBO.glFramebufferTexture2D(FBO.GL_FRAMEBUFFER, FBO.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D, tex, 0)
status = FBO.glCheckFramebufferStatus(FBO.GL_FRAMEBUFFER)
if status != FBO.GL_FRAMEBUFFER_COMPLETE:
print "glCheckFramebufferStatus", status
self.enabled = False
return
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, buf)
with gl.glPushAttrib(GL.GL_VIEWPORT_BIT):
GL.glViewport(0, 0, width, height)
drawFunc()
FBO.glBindFramebuffer(FBO.GL_FRAMEBUFFER, 0)
FBO.glDeleteFramebuffers(1, [buf])
FBO.glDeleteRenderbuffers(1, [depthbuffer])
self.enabled = True
else:
GL.glReadBuffer(GL.GL_BACK)
GL.glPushAttrib(GL.GL_VIEWPORT_BIT | GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT | GL.GL_STENCIL_TEST | GL.GL_STENCIL_BUFFER_BIT)
GL.glDisable(GL.GL_STENCIL_TEST)
GL.glViewport(0, 0, width, height)
GL.glScissor(0, 0, width, height)
with gl.glEnable(GL.GL_SCISSOR_TEST):
drawFunc()
GL.glBindTexture(GL.GL_TEXTURE_2D, tex)
GL.glReadBuffer(GL.GL_BACK)
GL.glCopyTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, 0, 0, width, height)
GL.glPopAttrib()
def debugDrawPoint(point):
GL.glColor(1.0, 1.0, 0.0, 1.0)
GL.glPointSize(9.0)
with gl.glBegin(GL.GL_POINTS):
GL.glVertex3f(*point)
|
isc
|
88fd09a9441caa39bce11368729c626d
| 28.324627
| 145
| 0.616109
| 3.631701
| false
| false
| false
| false
|
mcedit/mcedit
|
filters/AddPotionEffect.py
|
1
|
1637
|
# Feel free to modify and use this filter however you wish. If you do,
# please give credit to SethBling.
# http://youtube.com/SethBling
from pymclevel import TAG_List
from pymclevel import TAG_Byte
from pymclevel import TAG_Int
from pymclevel import TAG_Compound
displayName = "Add Potion Effect to Mobs"
Effects = {
"Strength": 5,
"Jump Boost": 8,
"Regeneration": 10,
"Fire Resistance": 12,
"Water Breathing": 13,
"Resistance": 11,
"Weakness": 18,
"Poison": 19,
"Speed (no mob effect)": 1,
"Slowness (no mob effect)": 2,
"Haste (no mob effect)": 3,
"Mining Fatigue (no mob effectg)": 4,
"Nausea (no mob effect)": 9,
"Blindness (no mob effect)": 15,
"Hunger (no mob effect)": 17,
"Invisibility (no effect)": 14,
"Night Vision (no effect)": 16,
}
EffectKeys = ()
for key in Effects.keys():
EffectKeys = EffectKeys + (key,)
inputs = (
("Effect", EffectKeys),
("Level", 1),
("Duration (Seconds)", 60),
)
def perform(level, box, options):
effect = Effects[options["Effect"]]
amp = options["Level"]
duration = options["Duration (Seconds)"] * 20
for (chunk, slices, point) in level.getChunkSlices(box):
for e in chunk.Entities:
x = e["Pos"][0].value
y = e["Pos"][1].value
z = e["Pos"][2].value
if x >= box.minx and x < box.maxx and y >= box.miny and y < box.maxy and z >= box.minz and z < box.maxz:
if "Health" in e:
if "ActiveEffects" not in e:
e["ActiveEffects"] = TAG_List()
ef = TAG_Compound()
ef["Amplifier"] = TAG_Byte(amp)
ef["Id"] = TAG_Byte(effect)
ef["Duration"] = TAG_Int(duration)
e["ActiveEffects"].append(ef)
chunk.dirty = True
|
isc
|
64e2e278a4946f0af9f2dc5da81f73a5
| 24.578125
| 107
| 0.638363
| 2.648867
| false
| false
| false
| false
|
mcedit/mcedit
|
editortools/filter.py
|
1
|
14414
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
import collections
import os
import traceback
from albow import FloatField, IntField, AttrRef, Row, Label, Widget, TabPanel, CheckBox, Column, Button, TextFieldWrapped
from editortools.blockview import BlockButton
from editortools.editortool import EditorTool
from glbackground import Panel
from mceutils import ChoiceButton, alertException, setWindowCaption, showProgress
import mcplatform
from operation import Operation
from albow.dialogs import wrapped_label, alert
import pymclevel
from pymclevel import BoundingBox
def alertFilterException(func):
def _func(*args, **kw):
try:
func(*args, **kw)
except Exception, e:
print traceback.format_exc()
alert(u"Exception during filter operation. See console for details.\n\n{0}".format(e))
return _func
def addNumField(page, optionName, val, min=None, max=None):
if isinstance(val, float):
ftype = FloatField
else:
ftype = IntField
if min == max:
min = None
max = None
field = ftype(value=val, width=100, min=min, max=max)
page.optionDict[optionName] = AttrRef(field, 'value')
row = Row([Label(optionName), field])
return row
class FilterModuleOptions(Widget):
is_gl_container = True
def __init__(self, tool, module, *args, **kw):
Widget.__init__(self, *args, **kw)
self.tool = tool
pages = TabPanel()
pages.is_gl_container = True
self.pages = pages
self.optionDict = {}
pageTabContents = []
print "Creating options for ", module
if hasattr(module, "inputs"):
if isinstance(module.inputs, list):
for tabData in module.inputs:
title, page, pageRect = self.makeTabPage(self.tool, tabData)
pages.add_page(title, page)
pages.set_rect(pageRect.union(pages._rect))
elif isinstance(module.inputs, tuple):
title, page, pageRect = self.makeTabPage(self.tool, module.inputs)
pages.add_page(title, page)
pages.set_rect(pageRect)
else:
self.size = (0, 0)
pages.shrink_wrap()
self.add(pages)
self.shrink_wrap()
if len(pages.pages):
if(pages.current_page != None):
pages.show_page(pages.current_page)
else:
pages.show_page(pages.pages[0])
for eachPage in pages.pages:
self.optionDict = dict(self.optionDict.items() + eachPage.optionDict.items())
def makeTabPage(self, tool, inputs):
page = Widget()
page.is_gl_container = True
rows = []
cols = []
height = 0
max_height = 550
page.optionDict = {}
page.tool = tool
title = "Tab"
for optionName, optionType in inputs:
if isinstance(optionType, tuple):
if isinstance(optionType[0], (int, long, float)):
if len(optionType) > 2:
val, min, max = optionType
elif len(optionType) == 2:
min, max = optionType
val = min
rows.append(addNumField(page, optionName, val, min, max))
if isinstance(optionType[0], (str, unicode)):
isChoiceButton = False
if optionType[0] == "string":
kwds = []
wid = None
lin = None
val = None
for keyword in optionType:
if isinstance(keyword, (str, unicode)) and keyword != "string":
kwds.append(keyword)
for keyword in kwds:
splitWord = keyword.split('=')
if len(splitWord) > 1:
v = None
key = None
try:
v = int(splitWord[1])
except:
pass
key = splitWord[0]
if v is not None:
if key == "lines":
lin = v
elif key == "width":
wid = v
else:
if key == "value":
val = splitWord[1]
if lin is None:
lin = 1
if val is None:
val = "Input String Here"
if wid is None:
wid = 200
field = TextFieldWrapped(value=val, width=wid,lines=lin)
page.optionDict[optionName] = AttrRef(field, 'value')
row = Row((Label(optionName), field))
rows.append(row)
else:
isChoiceButton = True
if isChoiceButton:
choiceButton = ChoiceButton(map(str, optionType))
page.optionDict[optionName] = AttrRef(choiceButton, 'selectedChoice')
rows.append(Row((Label(optionName), choiceButton)))
elif isinstance(optionType, bool):
cbox = CheckBox(value=optionType)
page.optionDict[optionName] = AttrRef(cbox, 'value')
row = Row((Label(optionName), cbox))
rows.append(row)
elif isinstance(optionType, (int, float)):
rows.append(addNumField(self, optionName, optionType))
elif optionType == "blocktype" or isinstance(optionType, pymclevel.materials.Block):
blockButton = BlockButton(tool.editor.level.materials)
if isinstance(optionType, pymclevel.materials.Block):
blockButton.blockInfo = optionType
row = Column((Label(optionName), blockButton))
page.optionDict[optionName] = AttrRef(blockButton, 'blockInfo')
rows.append(row)
elif optionType == "label":
rows.append(wrapped_label(optionName, 50))
elif optionType == "string":
field = TextFieldWrapped(value="Input String Here", width=200, lines=1)
page.optionDict[optionName] = AttrRef(field, 'value')
row = Row((Label(optionName), field))
rows.append(row)
elif optionType == "title":
title = optionName
else:
raise ValueError(("Unknown option type", optionType))
height = sum(r.height for r in rows)
if height > max_height:
h = 0
for i, r in enumerate(rows):
h += r.height
if h > height / 2:
break
cols.append(Column(rows[:i]))
rows = rows[i:]
#cols.append(Column(rows))
if len(rows):
cols.append(Column(rows))
if len(cols):
page.add(Row(cols))
page.shrink_wrap()
return (title, page, page._rect)
@property
def options(self):
return dict((k, v.get()) for k, v in self.optionDict.iteritems())
@options.setter
def options(self, val):
for k in val:
if k in self.optionDict:
self.optionDict[k].set(val[k])
class FilterToolPanel(Panel):
def __init__(self, tool):
Panel.__init__(self)
self.savedOptions = {}
self.tool = tool
self.selectedFilterName = None
if len(self.tool.filterModules):
self.reload()
def reload(self):
for i in list(self.subwidgets):
self.remove(i)
tool = self.tool
if len(tool.filterModules) is 0:
self.add(Label("No filter modules found!"))
self.shrink_wrap()
return
if self.selectedFilterName is None or self.selectedFilterName not in tool.filterNames:
self.selectedFilterName = tool.filterNames[0]
self.filterOptionsPanel = None
while self.filterOptionsPanel is None:
module = self.tool.filterModules[self.selectedFilterName]
try:
self.filterOptionsPanel = FilterModuleOptions(self.tool, module)
except Exception, e:
alert("Error creating filter inputs for {0}: {1}".format(module, e))
traceback.print_exc()
self.tool.filterModules.pop(self.selectedFilterName)
self.selectedFilterName = tool.filterNames[0]
if len(tool.filterNames) == 0:
raise ValueError("No filters loaded!")
self.filterSelect = ChoiceButton(tool.filterNames, choose=self.filterChanged)
self.filterSelect.selectedChoice = self.selectedFilterName
self.confirmButton = Button("Filter", action=self.tool.confirm)
filterLabel = Label("Filter:", fg_color=(177, 177, 255, 255))
filterLabel.mouse_down = lambda x: mcplatform.platform_open(mcplatform.filtersDir)
filterLabel.tooltipText = "Click to open filters folder"
filterSelectRow = Row((filterLabel, self.filterSelect))
self.add(Column((filterSelectRow, self.filterOptionsPanel, self.confirmButton)))
self.shrink_wrap()
if self.parent:
self.centery = self.parent.centery
if self.selectedFilterName in self.savedOptions:
self.filterOptionsPanel.options = self.savedOptions[self.selectedFilterName]
def filterChanged(self):
self.saveOptions()
self.selectedFilterName = self.filterSelect.selectedChoice
self.reload()
filterOptionsPanel = None
def saveOptions(self):
if self.filterOptionsPanel:
self.savedOptions[self.selectedFilterName] = self.filterOptionsPanel.options
class FilterOperation(Operation):
def __init__(self, editor, level, box, filter, options):
super(FilterOperation, self).__init__(editor, level)
self.box = box
self.filter = filter
self.options = options
def perform(self, recordUndo=True):
if recordUndo:
self.undoLevel = self.extractUndo(self.level, self.box)
self.filter.perform(self.level, BoundingBox(self.box), self.options)
pass
def dirtyBox(self):
return self.box
class FilterTool(EditorTool):
tooltipText = "Filter"
toolIconName = "filter"
def __init__(self, editor):
EditorTool.__init__(self, editor)
self.filterModules = {}
self.panel = FilterToolPanel(self)
@property
def statusText(self):
return "Choose a filter, then click Filter or press ENTER to apply it."
def toolEnabled(self):
return not (self.selectionBox() is None)
def toolSelected(self):
self.showPanel()
@alertException
def showPanel(self):
if self.panel.parent:
self.editor.remove(self.panel)
self.reloadFilters()
#self.panel = FilterToolPanel(self)
self.panel.reload()
self.panel.left = self.editor.left
self.panel.centery = self.editor.centery
self.editor.add(self.panel)
def hidePanel(self):
self.panel.saveOptions()
if self.panel.parent:
self.panel.parent.remove(self.panel)
def reloadFilters(self):
filterDir = mcplatform.filtersDir
filterFiles = os.listdir(filterDir)
filterPyfiles = filter(lambda x: x.endswith(".py"), filterFiles)
def tryImport(name):
try:
return __import__(name)
except Exception, e:
print traceback.format_exc()
alert(u"Exception while importing filter module {}. See console for details.\n\n{}".format(name, e))
return object()
filterModules = (tryImport(x[:-3]) for x in filterPyfiles)
filterModules = filter(lambda module: hasattr(module, "perform"), filterModules)
self.filterModules = collections.OrderedDict(sorted((self.moduleDisplayName(x), x) for x in filterModules))
for m in self.filterModules.itervalues():
try:
reload(m)
except Exception, e:
print traceback.format_exc()
alert(u"Exception while reloading filter module {}. Using previously loaded module. See console for details.\n\n{}".format(m.__file__, e))
@property
def filterNames(self):
return [self.moduleDisplayName(module) for module in self.filterModules.itervalues()]
def moduleDisplayName(self, module):
return module.displayName if hasattr(module, 'displayName') else module.__name__.capitalize()
@alertFilterException
def confirm(self):
with setWindowCaption("APPLYING FILTER - "):
filterModule = self.filterModules[self.panel.filterSelect.selectedChoice]
op = FilterOperation(self.editor, self.editor.level, self.selectionBox(), filterModule, self.panel.filterOptionsPanel.options)
self.editor.level.showProgress = showProgress
self.editor.addOperation(op)
self.editor.addUnsavedEdit()
self.editor.invalidateBox(self.selectionBox())
|
isc
|
8c4a94cc4579854e01354607f90ff03e
| 34.156098
| 154
| 0.565423
| 4.500156
| false
| false
| false
| false
|
mcedit/mcedit
|
albow/resource.py
|
1
|
4762
|
# -*- coding: utf-8 -*-
import os
import sys
import pygame
from pygame.locals import RLEACCEL
#default_font_name = "Vera.ttf"
optimize_images = True
run_length_encode = False
def find_resource_dir():
try:
from directories import dataDir
return dataDir
except:
pass
dir = sys.path[0]
while 1:
path = os.path.join(dir, "MCEditData")
if os.path.exists(path):
return path
parent = os.path.dirname(dir)
if parent == dir:
raise SystemError("albow: Unable to find Resources directory")
dir = parent
resource_dir = find_resource_dir()
image_cache = {}
font_cache = {}
sound_cache = {}
text_cache = {}
cursor_cache = {}
def _resource_path(default_prefix, names, prefix=""):
return os.path.join(resource_dir, prefix or default_prefix, *names)
def resource_path(*names, **kwds):
return _resource_path("", names, **kwds)
def resource_exists(*names, **kwds):
return os.path.exists(_resource_path("", names, **kwds))
def _get_image(names, border=0, optimize=optimize_images, noalpha=False,
rle=run_length_encode, prefix="images"):
path = _resource_path(prefix, names)
image = image_cache.get(path)
if not image:
image = pygame.image.load(path)
if noalpha:
image = image.convert(24)
elif optimize:
image = image.convert_alpha()
if rle:
image.set_alpha(255, RLEACCEL)
if border:
w, h = image.get_size()
b = border
d = 2 * border
image = image.subsurface(b, b, w - d, h - d)
image_cache[path] = image
return image
def get_image(*names, **kwds):
return _get_image(names, **kwds)
def get_font(size, *names, **kwds):
path = _resource_path("fonts", names, **kwds)
key = (path, size)
font = font_cache.get(key)
if not font:
try:
font = pygame.font.Font(path, size)
except Exception, e:
try:
font = pygame.font.Font(path.encode(sys.getfilesystemencoding()), size)
except Exception, e:
print "Couldn't get font {0}, using sysfont".format((path, size))
font = pygame.font.SysFont("Courier New", size)
font_cache[key] = font
return font
class DummySound(object):
def fadeout(self, x):
pass
def get_length(self):
return 0.0
def get_num_channels(self):
return 0
def get_volume(self):
return 0.0
def play(self, *args):
pass
def set_volume(self, x):
pass
def stop(self):
pass
dummy_sound = DummySound()
def get_sound(*names, **kwds):
if sound_cache is None:
return dummy_sound
path = _resource_path("sounds", names, **kwds)
sound = sound_cache.get(path)
if not sound:
try:
from pygame.mixer import Sound
except ImportError, e:
no_sound(e)
return dummy_sound
try:
sound = Sound(path)
except pygame.error, e:
missing_sound(e, path)
return dummy_sound
sound_cache[path] = sound
return sound
def no_sound(e):
global sound_cache
print "albow.resource.get_sound: %s" % e
print "albow.resource.get_sound: Sound not available, continuing without it"
sound_cache = None
def missing_sound(e, name):
print "albow.resource.get_sound: %s: %s" % (name, e)
def get_text(*names, **kwds):
path = _resource_path("text", names, **kwds)
text = text_cache.get(path)
if text is None:
text = open(path, "rU").read()
text_cache[path] = text
return text
def load_cursor(path):
image = get_image(path)
width, height = image.get_size()
hot = (0, 0)
data = []
mask = []
rowbytes = (width + 7) // 8
xr = xrange(width)
yr = xrange(height)
for y in yr:
bit = 0x80
db = mb = 0
for x in xr:
r, g, b, a = image.get_at((x, y))
if a >= 128:
mb |= bit
if r + g + b < 383:
db |= bit
if r == 0 and b == 255:
hot = (x, y)
bit >>= 1
if not bit:
data.append(db)
mask.append(mb)
db = mb = 0
bit = 0x80
if bit != 0x80:
data.append(db)
mask.append(mb)
return (8 * rowbytes, height), hot, data, mask
def get_cursor(*names, **kwds):
path = _resource_path("cursors", names, **kwds)
cursor = cursor_cache.get(path)
if cursor is None:
cursor = load_cursor(path)
cursor_cache[path] = cursor
return cursor
|
isc
|
fae12aed5c3b30b027c51c7459d996f8
| 23.546392
| 87
| 0.546829
| 3.54052
| false
| false
| false
| false
|
mcedit/mcedit
|
editortools/chunk.py
|
1
|
19257
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
import traceback
from OpenGL import GL
import numpy
from numpy import newaxis
from albow import Label, ValueDisplay, AttrRef, Button, Column, ask, Row, alert, Widget, Menu
from editortools.editortool import EditorTool
from glbackground import Panel
from glutils import DisplayList, gl
from mceutils import alertException, setWindowCaption, showProgress, ChoiceButton, IntInputRow, CheckBoxLabel
import mcplatform
import pymclevel
from pymclevel.minecraft_server import MCServerChunkGenerator
from albow.dialogs import Dialog
class ChunkToolPanel(Panel):
def __init__(self, tool, *a, **kw):
Panel.__init__(self, *a, **kw)
self.tool = tool
self.anchor = "whl"
chunkToolLabel = Label("Selected Chunks:")
self.chunksLabel = ValueDisplay(ref=AttrRef(self, 'chunkSizeText'), width=100)
self.chunksLabel.align = "c"
self.chunksLabel.tooltipText = "..."
extractButton = Button("Extract")
extractButton.tooltipText = "Extract these chunks to individual chunk files"
extractButton.action = tool.extractChunks
extractButton.highlight_color = (255, 255, 255)
deselectButton = Button("Deselect",
tooltipText=None,
action=tool.editor.deselect,
)
createButton = Button("Create")
createButton.tooltipText = "Create new, empty chunks within the selection."
createButton.action = tool.createChunks
createButton.highlight_color = (0, 255, 0)
destroyButton = Button("Delete")
destroyButton.tooltipText = "Delete the selected chunks from disk. Minecraft will recreate them the next time you are near."
destroyButton.action = tool.destroyChunks
pruneButton = Button("Prune")
pruneButton.tooltipText = "Prune the world, leaving only the selected chunks. Any chunks outside of the selection will be removed, and empty region files will be deleted from disk"
pruneButton.action = tool.pruneChunks
relightButton = Button("Relight")
relightButton.tooltipText = "Recalculate light values across the selected chunks"
relightButton.action = tool.relightChunks
relightButton.highlight_color = (255, 255, 255)
repopButton = Button("Repop")
repopButton.tooltipText = "Mark the selected chunks for repopulation. The next time you play Minecraft, the chunks will have trees, ores, and other features regenerated."
repopButton.action = tool.repopChunks
repopButton.highlight_color = (255, 200, 155)
dontRepopButton = Button("Don't Repop")
dontRepopButton.tooltipText = "Unmark the selected chunks. They will not repopulate the next time you play the game."
dontRepopButton.action = tool.dontRepopChunks
dontRepopButton.highlight_color = (255, 255, 255)
col = Column((chunkToolLabel, self.chunksLabel, deselectButton, createButton, destroyButton, pruneButton, relightButton, extractButton, repopButton, dontRepopButton))
# col.right = self.width - 10;
self.width = col.width
self.height = col.height
#self.width = 120
self.add(col)
@property
def chunkSizeText(self):
return "{0} chunks".format(len(self.tool.selectedChunks()))
def updateText(self):
pass
#self.chunksLabel.text = self.chunksLabelText()
class ChunkTool(EditorTool):
toolIconName = "chunk"
tooltipText = "Chunk Control"
@property
def statusText(self):
return "Click and drag to select chunks. Hold ALT to deselect chunks. Hold SHIFT to select chunks."
def toolEnabled(self):
return isinstance(self.editor.level, pymclevel.ChunkedLevelMixin)
_selectedChunks = None
_displayList = None
def drawToolMarkers(self):
if self._displayList is None:
self._displayList = DisplayList(self._drawToolMarkers)
#print len(self._selectedChunks) if self._selectedChunks else None, "!=", len(self.editor.selectedChunks)
if self._selectedChunks != self.editor.selectedChunks or True: # xxx
self._selectedChunks = set(self.editor.selectedChunks)
self._displayList.invalidate()
self._displayList.call()
def _drawToolMarkers(self):
lines = (
((-1, 0), (0, 0, 0, 1), []),
((1, 0), (1, 0, 1, 1), []),
((0, -1), (0, 0, 1, 0), []),
((0, 1), (0, 1, 1, 1), []),
)
for ch in self._selectedChunks:
cx, cz = ch
for (dx, dz), points, positions in lines:
n = (cx + dx, cz + dz)
if n not in self._selectedChunks:
positions.append([ch])
color = self.editor.selectionTool.selectionColor + (0.3, )
GL.glColor(*color)
with gl.glEnable(GL.GL_BLEND):
import renderer
sizedChunks = renderer.chunkMarkers(self._selectedChunks)
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPosition = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPosition[..., (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPosition[..., (0, 2)] *= size
chunkPosition[..., (0, 2)] += chunks[:, newaxis, :]
chunkPosition *= 16
chunkPosition[..., 1] = self.editor.level.Height
GL.glVertexPointer(3, GL.GL_FLOAT, 0, chunkPosition.ravel())
#chunkPosition *= 8
GL.glDrawArrays(GL.GL_QUADS, 0, len(chunkPosition) * 4)
for d, points, positions in lines:
if 0 == len(positions):
continue
vertexArray = numpy.zeros((len(positions), 4, 3), dtype='float32')
vertexArray[..., [0, 2]] = positions
vertexArray.shape = len(positions), 2, 2, 3
vertexArray[..., 0, 0, 0] += points[0]
vertexArray[..., 0, 0, 2] += points[1]
vertexArray[..., 0, 1, 0] += points[2]
vertexArray[..., 0, 1, 2] += points[3]
vertexArray[..., 1, 0, 0] += points[2]
vertexArray[..., 1, 0, 2] += points[3]
vertexArray[..., 1, 1, 0] += points[0]
vertexArray[..., 1, 1, 2] += points[1]
vertexArray *= 16
vertexArray[..., 1, :, 1] = self.editor.level.Height
GL.glVertexPointer(3, GL.GL_FLOAT, 0, vertexArray)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glDrawArrays(GL.GL_QUADS, 0, len(positions) * 4)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
with gl.glEnable(GL.GL_BLEND, GL.GL_DEPTH_TEST):
GL.glDepthMask(False)
GL.glDrawArrays(GL.GL_QUADS, 0, len(positions) * 4)
GL.glDepthMask(True)
@property
def worldTooltipText(self):
box = self.editor.selectionTool.selectionBoxInProgress()
if box:
box = box.chunkBox(self.editor.level)
l, w = box.length // 16, box.width // 16
return "%s x %s chunks" % (l, w)
def toolSelected(self):
self.editor.selectionToChunks()
self.panel = ChunkToolPanel(self)
self.panel.centery = self.editor.centery
self.panel.left = 10
self.editor.add(self.panel)
def cancel(self):
self.editor.remove(self.panel)
def selectedChunks(self):
return self.editor.selectedChunks
@alertException
def extractChunks(self):
folder = mcplatform.askSaveFile(mcplatform.docsFolder,
title='Export chunks to...',
defaultName=self.editor.level.displayName + "_chunks",
filetype='Folder\0*.*\0\0',
suffix="",
)
if not folder:
return
# TODO: We need a third dimension, Scotty!
for cx, cz in self.selectedChunks():
if self.editor.level.containsChunk(cx, cz):
self.editor.level.extractChunk(cx, cz, folder)
@alertException
def destroyChunks(self, chunks=None):
if "No" == ask("Really delete these chunks? This cannot be undone.", ("Yes", "No")):
return
if chunks is None:
chunks = self.selectedChunks()
chunks = list(chunks)
def _destroyChunks():
i = 0
chunkCount = len(chunks)
for cx, cz in chunks:
i += 1
yield (i, chunkCount)
if self.editor.level.containsChunk(cx, cz):
try:
self.editor.level.deleteChunk(cx, cz)
except Exception, e:
print "Error during chunk delete: ", e
with setWindowCaption("DELETING - "):
showProgress("Deleting chunks...", _destroyChunks())
self.editor.renderer.invalidateChunkMarkers()
self.editor.renderer.discardChunks(chunks)
#self.editor.addUnsavedEdit()
@alertException
def pruneChunks(self):
if "No" == ask("Save these chunks and remove the rest? This cannot be undone.", ("Yes", "No")):
return
self.editor.saveFile()
def _pruneChunks():
selectedChunks = self.selectedChunks()
for i, cPos in enumerate(list(self.editor.level.allChunks)):
if cPos not in selectedChunks:
try:
self.editor.level.deleteChunk(*cPos)
except Exception, e:
print "Error during chunk delete: ", e
yield i, self.editor.level.chunkCount
with setWindowCaption("PRUNING - "):
showProgress("Pruning chunks...", _pruneChunks())
self.editor.renderer.invalidateChunkMarkers()
self.editor.discardAllChunks()
#self.editor.addUnsavedEdit()
@alertException
def relightChunks(self):
def _relightChunks():
for i in self.editor.level.generateLightsIter(self.selectedChunks()):
yield i
with setWindowCaption("RELIGHTING - "):
showProgress("Lighting {0} chunks...".format(len(self.selectedChunks())),
_relightChunks(), cancel=True)
self.editor.invalidateChunks(self.selectedChunks())
self.editor.addUnsavedEdit()
@alertException
def createChunks(self):
panel = GeneratorPanel()
col = [panel]
label = Label("Create chunks using the settings above? This cannot be undone.")
col.append(Row([Label("")]))
col.append(label)
col = Column(col)
if Dialog(client=col, responses=["OK", "Cancel"]).present() == "Cancel":
return
chunks = self.selectedChunks()
createChunks = panel.generate(self.editor.level, chunks)
try:
with setWindowCaption("CREATING - "):
showProgress("Creating {0} chunks...".format(len(chunks)), createChunks, cancel=True)
except Exception, e:
traceback.print_exc()
alert("Failed to start the chunk generator. {0!r}".format(e))
finally:
self.editor.renderer.invalidateChunkMarkers()
self.editor.renderer.loadNearbyChunks()
@alertException
def repopChunks(self):
for cpos in self.selectedChunks():
try:
chunk = self.editor.level.getChunk(*cpos)
chunk.TerrainPopulated = False
except pymclevel.ChunkNotPresent:
continue
self.editor.renderer.invalidateChunks(self.selectedChunks(), layers=["TerrainPopulated"])
@alertException
def dontRepopChunks(self):
for cpos in self.selectedChunks():
try:
chunk = self.editor.level.getChunk(*cpos)
chunk.TerrainPopulated = True
except pymclevel.ChunkNotPresent:
continue
self.editor.renderer.invalidateChunks(self.selectedChunks(), layers=["TerrainPopulated"])
def mouseDown(self, *args):
return self.editor.selectionTool.mouseDown(*args)
def mouseUp(self, evt, *args):
self.editor.selectionTool.mouseUp(evt, *args)
def GeneratorPanel():
panel = Widget()
panel.chunkHeight = 64
panel.grass = True
panel.simulate = False
jarStorage = MCServerChunkGenerator.getDefaultJarStorage()
if jarStorage:
jarStorage.reloadVersions()
generatorChoice = ChoiceButton(["Minecraft Server", "Flatland"])
panel.generatorChoice = generatorChoice
col = [Row((Label("Generator:"), generatorChoice))]
noVersionsRow = Label("Will automatically download and use the latest version")
versionContainer = Widget()
heightinput = IntInputRow("Height: ", ref=AttrRef(panel, "chunkHeight"), min=0, max=128)
grassinput = CheckBoxLabel("Grass", ref=AttrRef(panel, "grass"))
flatPanel = Column([heightinput, grassinput], align="l")
def generatorChoiceChanged():
serverPanel.visible = generatorChoice.selectedChoice == "Minecraft Server"
flatPanel.visible = not serverPanel.visible
generatorChoice.choose = generatorChoiceChanged
versionChoice = None
if len(jarStorage.versions):
def checkForUpdates():
def _check():
yield
jarStorage.downloadCurrentServer()
yield
showProgress("Checking for server updates...", _check())
versionChoice.choices = sorted(jarStorage.versions, reverse=True)
versionChoice.choiceIndex = 0
versionChoice = ChoiceButton(sorted(jarStorage.versions, reverse=True))
versionChoiceRow = (Row((
Label("Server version:"),
versionChoice,
Label("or"),
Button("Check for Updates", action=checkForUpdates))))
panel.versionChoice = versionChoice
versionContainer.add(versionChoiceRow)
else:
versionContainer.add(noVersionsRow)
versionContainer.shrink_wrap()
menu = Menu("Advanced", [
("Open Server Storage", "revealStorage"),
("Reveal World Cache", "revealCache"),
("Delete World Cache", "clearCache")
])
def presentMenu():
i = menu.present(advancedButton.parent, advancedButton.topleft)
if i != -1:
(revealStorage, revealCache, clearCache)[i]()
advancedButton = Button("Advanced...", presentMenu)
@alertException
def revealStorage():
mcplatform.platform_open(jarStorage.cacheDir)
@alertException
def revealCache():
mcplatform.platform_open(MCServerChunkGenerator.worldCacheDir)
#revealCacheRow = Row((Label("Minecraft Server Storage: "), Button("Open Folder", action=revealCache, tooltipText="Click me to install your own minecraft_server.jar if you have any.")))
@alertException
def clearCache():
MCServerChunkGenerator.clearWorldCache()
simRow = CheckBoxLabel("Simulate world", ref=AttrRef(panel, "simulate"), tooltipText="Simulate the world for a few seconds after generating it. Reduces the save file size by processing all of the TileTicks.")
simRow = Row((simRow, advancedButton), anchor="lrh")
#deleteCacheRow = Row((Label("Delete Temporary World File Cache?"), Button("Delete Cache!", action=clearCache, tooltipText="Click me if you think your chunks are stale.")))
serverPanel = Column([versionContainer, simRow, ], align="l")
col.append(serverPanel)
col = Column(col, align="l")
col.add(flatPanel)
flatPanel.topleft = serverPanel.topleft
flatPanel.visible = False
panel.add(col)
panel.shrink_wrap()
def generate(level, arg):
useServer = generatorChoice.selectedChoice == "Minecraft Server"
if useServer:
def _createChunks():
if versionChoice:
version = versionChoice.selectedChoice
else:
version = None
gen = MCServerChunkGenerator(version=version)
if isinstance(arg, pymclevel.BoundingBox):
for i in gen.createLevelIter(level, arg, simulate=panel.simulate):
yield i
else:
for i in gen.generateChunksInLevelIter(level, arg, simulate=panel.simulate):
yield i
else:
def _createChunks():
height = panel.chunkHeight
grass = panel.grass and pymclevel.alphaMaterials.Grass.ID or pymclevel.alphaMaterials.Dirt.ID
if isinstance(arg, pymclevel.BoundingBox):
chunks = list(arg.chunkPositions)
else:
chunks = arg
if level.dimNo in (-1, 1):
maxskylight = 0
else:
maxskylight = 15
for i, (cx, cz) in enumerate(chunks):
yield i, len(chunks)
#surface = blockInput.blockInfo
#for cx, cz in :
try:
level.createChunk(cx, cz)
except ValueError, e: # chunk already present
print e
continue
else:
ch = level.getChunk(cx, cz)
if height > 0:
stoneHeight = max(0, height - 5)
grassHeight = max(0, height - 1)
ch.Blocks[:, :, grassHeight] = grass
ch.Blocks[:, :, stoneHeight:grassHeight] = pymclevel.alphaMaterials.Dirt.ID
ch.Blocks[:, :, :stoneHeight] = pymclevel.alphaMaterials.Stone.ID
ch.Blocks[:, :, 0] = pymclevel.alphaMaterials.Bedrock.ID
ch.SkyLight[:, :, height:] = maxskylight
if maxskylight:
ch.HeightMap[:] = height
else:
ch.SkyLight[:] = maxskylight
ch.needsLighting = False
ch.dirty = True
return _createChunks()
panel.generate = generate
return panel
|
isc
|
6f969e31dd1114cfb3746750d22d8213
| 36.319767
| 212
| 0.595731
| 4.19086
| false
| false
| false
| false
|
mozilla-services/tecken
|
systemtests/bin/make-symbols-zip.py
|
1
|
5995
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Download SYM files and put them into a ZIP file for testing upload with.
#
# Usage: ./bin/make-zymbols-zip.py [OUTPUTDIR]
import datetime
import os
import shutil
import tempfile
from urllib.parse import urljoin
import zipfile
import click
import requests
# Number of seconds to wait for a response from server
CONNECTION_TIMEOUT = 60
SYMBOLS_URL = "https://symbols.mozilla.org/"
def get_sym_files(auth_token, url, start_page):
"""Given an auth token, generates filenames and sizes for SYM files.
:param auth_token: auth token for symbols.mozilla.org
:param url: url for file uploads
:param start_page: the page of files to start with
:returns: generator of (key, size) typles
"""
sym_files = []
page = start_page
params = {"page": start_page}
headers = {"auth-token": auth_token, "User-Agent": "tecken-systemtests"}
while True:
if sym_files:
yield sym_files.pop(0)
else:
params["page"] = page
resp = requests.get(
url,
params=params,
headers=headers,
timeout=CONNECTION_TIMEOUT,
)
resp.raise_for_status()
data = resp.json()
sym_files = [(record["key"], record["size"]) for record in data["files"]]
page += 1
def build_zip_file(zip_filename, sym_dir):
"""Generates a ZIP file of contents of sym dir.
:param zip_filename: full path to zip file
:param sym_dir: full path to directory of SYM files
:returns: path to zip file
"""
# Create zip file
with zipfile.ZipFile(zip_filename, mode="w") as fp:
for root, dirs, files in os.walk(sym_dir):
if not files:
continue
for sym_file in files:
full_path = os.path.join(root, sym_file)
arcname = full_path[len(sym_dir) + 1 :]
fp.write(
full_path,
arcname=arcname,
compress_type=zipfile.ZIP_DEFLATED,
)
def download_sym_file(url, sym_file):
"""Download SYM file into sym_dir."""
headers = {"User-Agent": "tecken-systemtests"}
resp = requests.get(url, headers=headers, timeout=CONNECTION_TIMEOUT)
if resp.status_code != 200:
return
dirname = os.path.dirname(sym_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(sym_file, "wb") as fp:
fp.write(resp.content)
def get_size(filename):
"""Get the size of a file.
:param filename: the filename to check
:returns: 0 if the file doesn't exist; file size otherwise
"""
if not os.path.exists(filename):
return 0
return os.stat(filename).st_size
@click.command()
@click.option(
"--auth-token",
required=True,
help="Auth token for symbols.mozilla.org.",
)
@click.option(
"--start-page",
default=1,
type=int,
help="Page of SYM files to start with.",
)
@click.option(
"--max-size",
default=10_000_000,
type=int,
help="Max size in bytes resulting ZIP file can't exceed.",
)
@click.argument("outputdir")
@click.pass_context
def make_symbols_zip(ctx, auth_token, start_page, max_size, outputdir):
"""
Builds a zip file of SYM files recently uploaded to symbols.mozilla.org.
Note: This requires an auth token for symbols.mozilla.org to view files.
"""
# Figure out the ZIP file name and final path
zip_filename = datetime.datetime.now().strftime("symbols_%Y%m%d_%H%M%S.zip")
zip_path = os.path.join(outputdir, zip_filename)
click.echo(f"Generating ZIP file {zip_path} ...")
with tempfile.TemporaryDirectory(prefix="symbols") as tmpdirname:
sym_dir = os.path.join(tmpdirname, "syms")
tmp_zip_path = os.path.join(tmpdirname, zip_filename)
sym_files_url = urljoin(SYMBOLS_URL, "/api/uploads/files/")
sym_files_generator = get_sym_files(auth_token, sym_files_url, start_page)
for sym_filename, sym_size in sym_files_generator:
if sym_filename.endswith(".0"):
# Skip these because there aren't SYM files for them.
continue
is_try = False
if os.path.exists(tmp_zip_path):
# See if the new zip file is too big; if it is, we're done!
zip_size = os.stat(tmp_zip_path).st_size
click.echo(f"size: {zip_size:,}, max_size: {max_size:,}")
if zip_size > max_size:
# Handle weird case where the first zip file we built was
# too big--just use that.
if not os.path.exists(zip_path):
shutil.copy(tmp_zip_path, zip_path)
break
# This zip file isn't too big, so copy it over.
shutil.copy(tmp_zip_path, zip_path)
click.echo(
click.style(f"Adding {sym_filename} ({sym_size:,}) ...", fg="yellow")
)
# Download SYM file into temporary directory
if sym_filename.startswith("try/"):
sym_filename = sym_filename[4:]
is_try = True
if sym_filename.startswith("v1/"):
sym_filename = sym_filename[3:]
url = urljoin(SYMBOLS_URL, sym_filename)
if is_try:
url = url + "?try"
sym_file = os.path.join(sym_dir, sym_filename)
download_sym_file(url, sym_file)
# Build the new zip file
build_zip_file(tmp_zip_path, sym_dir)
zip_size = os.stat(zip_path).st_size
click.echo(f"Completed {zip_path} ({zip_size:,})!")
if __name__ == "__main__":
make_symbols_zip()
|
mpl-2.0
|
a70246809efc67456c7672da4f6377db
| 28.975
| 85
| 0.584821
| 3.744535
| false
| false
| false
| false
|
mozilla-services/tecken
|
systemtests/bin/fetch-crashids.py
|
1
|
2558
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Fetch crash ids for Firefox nightly from Crash Stats.
#
# Usage: ./bin/fetch-crashids.py
from urllib.parse import urljoin
import click
import requests
CRASHSTATS = "https://crash-stats.mozilla.org/"
MAX_PAGE = 1000
# Indicators that the crash report probably doesn't have a good stack for
# symbolication
MEH_INDICATORS = [
"IPCError",
".dll",
".so",
]
def is_meh(signature):
for indicator in MEH_INDICATORS:
if indicator in signature:
return True
return False
def fetch_supersearch(url, params):
headers = {"User-Agent": "tecken-systemtests"}
# Set up first page
params["_results_offset"] = 0
params["_results_number"] = MAX_PAGE
crashids_count = 0
while True:
resp = requests.get(url=url, params=params, headers=headers)
hits = resp.json()["hits"]
yield from hits
# If there are no more crash ids to get, we return
total = resp.json()["total"]
if not hits or crashids_count >= total:
return
# Get the next page, but only as many results as we need
params["_results_offset"] += MAX_PAGE
params["_results_number"] = min(
# MAX_PAGE is the maximum we can request
MAX_PAGE,
# The number of results Super Search can return to us that is
# hasn't returned so far
total - crashids_count,
)
@click.command()
@click.option(
"--debug/--no-debug",
default=False,
help="Show debug output.",
)
@click.option(
"--num-results",
default=10,
type=int,
help="Number of crash ids to return.",
)
@click.pass_context
def fetch_crashids(ctx, debug, num_results):
params = {
"product": "Firefox",
"release_channel": "nightly",
"_columns": ["uuid", "signature"],
"_sort": ["-date"],
}
url = urljoin(CRASHSTATS, "/api/SuperSearch/")
crashids = 0
for result in fetch_supersearch(url=url, params=params):
# Skip crash reports that probably have meh stacks
if is_meh(result["signature"]):
continue
if debug:
print(result)
else:
print(result["uuid"])
crashids += 1
if crashids > num_results:
break
if __name__ == "__main__":
fetch_crashids()
|
mpl-2.0
|
2ccfe33228ead32de7e4c4b5aba8a254
| 23.361905
| 73
| 0.60086
| 3.800892
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/tests/test_download.py
|
1
|
29954
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import csv
import datetime
from io import StringIO
import json
import os
from unittest import mock
from urllib.parse import urlparse
import pytest
from django.core.management import call_command
from django.urls import reverse
from django.utils import timezone
from tecken.base.symboldownloader import SymbolDownloader
from tecken.download import views
from tecken.download.models import MissingSymbol
_here = os.path.dirname(__file__)
def reload_downloaders(urls, try_downloader=None):
"""Because the tecken.download.views module has a global instance
of SymbolDownloader created at start-up, it's impossible to easily
change the URL if you want to test clients with a different URL.
This function hotfixes that instance to use a different URL(s).
"""
if isinstance(urls, str):
urls = tuple([urls])
views.normal_downloader = SymbolDownloader(urls)
if try_downloader:
views.try_downloader = SymbolDownloader([try_downloader])
def test_client_happy_path(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
return {"Contents": [{"Key": api_params["Prefix"]}]}
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 302
parsed = urlparse(response["location"])
assert parsed.netloc == "s3.example.com"
# the pre-signed URL will have the bucket in the path
assert parsed.path == (
"/private/prefix/v0/" "xul.pdb/44E4EC8C2F41492B9369D6B9A059577C2/xul.sym"
)
assert "Signature=" in parsed.query
assert "Expires=" in parsed.query
assert "AWSAccessKeyId=" in parsed.query
response = client.head(url)
assert response.status_code == 200
assert response.content == b""
assert response["Access-Control-Allow-Origin"] == "*"
assert response["Access-Control-Allow-Methods"] == "GET"
def test_client_legacy_product_prefix(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
return {"Contents": [{"Key": api_params["Prefix"]}]}
url = reverse(
"download:download_symbol_legacy",
args=("firefox", "xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 302
parsed = urlparse(response["location"])
assert parsed.netloc == "s3.example.com"
# the pre-signed URL will have the bucket in the path
assert parsed.path == (
"/private/prefix/v0/" "xul.pdb/44E4EC8C2F41492B9369D6B9A059577C2/xul.sym"
)
with botomock(mock_api_call):
response = client.head(url)
assert response.status_code == 200
assert response.content == b""
assert response["Access-Control-Allow-Origin"] == "*"
assert response["Access-Control-Allow-Methods"] == "GET"
# But if you try to mess with the prefix to something NOT
# recognized, it should immediately 404.
url = reverse(
"download:download_symbol_legacy",
args=("gobblygook", "xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 404
@pytest.mark.django_db
def test_client_try_download(client, botomock):
"""Suppose there's a file that doesn't exist in any of the
settings.SYMBOL_URLS but does exist in settings.UPLOAD_TRY_SYMBOLS_URL,
then to reach that file you need to use ?try on the URL.
"""
reload_downloaders(
"https://s3.example.com/private",
try_downloader="https://s3.example.com/private/trying",
)
mock_calls = []
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
mock_calls.append(api_params)
if api_params["Prefix"].startswith("trying/v0/"):
# Yeah, we have it
return {"Contents": [{"Key": api_params["Prefix"]}]}
elif api_params["Prefix"].startswith("v0"):
# Pretned nothing was returned. Ie. 404
return {}
else:
raise NotImplementedError(api_params["Prefix"])
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
try_url = reverse(
"download:download_symbol_try",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 404
assert len(mock_calls) == 1
response = client.get(try_url)
assert response.status_code == 302
assert len(mock_calls) == 2
# Also note that the headers are the same as for regular downloads
assert response["Access-Control-Allow-Origin"] == "*"
# And like regular download, you're only allowed to use GET or HEAD
response = client.put(try_url)
assert response.status_code == 405
# And calling it with DEBUG header should return a header with
# some debug info. Just like regular download.
response = client.get(try_url, HTTP_DEBUG="true")
assert response.status_code == 302
assert float(response["debug-time"]) > 0
# You can also use the regular URL but add ?try to the URL
response = client.get(url, {"try": True})
assert response.status_code == 302
assert len(mock_calls) == 2
# Do it again, to make sure the caches work in our favor
response = client.get(url)
assert response.status_code == 404
assert len(mock_calls) == 2
response = client.get(try_url)
assert response.status_code == 302
assert len(mock_calls) == 2
def test_client_with_debug(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
if api_params["Prefix"].endswith("xil.sym"):
return {"Contents": []}
elif api_params["Prefix"].endswith("xul.sym"):
return {"Contents": [{"Key": api_params["Prefix"]}]}
else:
raise NotImplementedError(api_params)
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url, HTTP_DEBUG="true")
assert response.status_code == 302
parsed = urlparse(response["location"])
assert float(response["debug-time"]) > 0
assert parsed.netloc == "s3.example.com"
# the pre-signed URL will have the bucket in the path
assert parsed.path == (
"/private/prefix/v0/" "xul.pdb/44E4EC8C2F41492B9369D6B9A059577C2/xul.sym"
)
assert "Signature=" in parsed.query
assert "Expires=" in parsed.query
assert "AWSAccessKeyId=" in parsed.query
response = client.head(url, HTTP_DEBUG="true")
assert response.status_code == 200
assert response.content == b""
assert float(response["debug-time"]) > 0
# This one won't be logged because the filename is on a block list
# of symbol filenames to ignore
ignore_url = reverse(
"download:download_symbol",
args=("cxinjime.pdb", "342D9B0A3AE64812A2388C055C9F6C321", "file.ptr"),
)
response = client.get(ignore_url, HTTP_DEBUG="true")
assert response.status_code == 404
assert float(response["debug-time"]) == 0.0
# Do a GET with a file that doesn't exist.
not_found_url = reverse(
"download:download_symbol",
args=("xil.pdb", "55F4EC8C2F41492B9369D6B9A059577A1", "xil.sym"),
)
response = client.get(not_found_url, HTTP_DEBUG="true")
assert response.status_code == 404
assert float(response["debug-time"]) > 0
def test_client_with_ignorable_file_extensions(client, botomock):
def mock_api_call(self, operation_name, api_params):
assert False, "This mock function shouldn't be called"
url = reverse(
"download:download_symbol",
args=(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
# Note! This is NOT in the settings.DOWNLOAD_FILE_EXTENSIONS_ALLOWED
# list.
"xul.xxx",
),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 404
def test_client_with_debug_with_cache(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
mock_api_calls = []
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
mock_api_calls.append(api_params)
return {"Contents": [{"Key": api_params["Prefix"]}]}
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url, HTTP_DEBUG="true")
assert response.status_code == 302
assert float(response["debug-time"]) > 0
response = client.get(url, HTTP_DEBUG="true")
assert response.status_code == 302
assert float(response["debug-time"]) > 0
response = client.head(url, HTTP_DEBUG="true")
assert response.status_code == 200
assert float(response["debug-time"]) > 0
assert len(mock_api_calls) == 1
def test_client_with_cache_refreshed(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
mock_api_calls = []
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
mock_api_calls.append(api_params)
return {"Contents": [{"Key": api_params["Prefix"]}]}
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 302
assert len(mock_api_calls) == 1
response = client.get(url)
assert response.status_code == 302
assert len(mock_api_calls) == 1 # still 1
response = client.get(url, {"_refresh": 1})
assert response.status_code == 302
assert len(mock_api_calls) == 2
def test_client_404(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
return {}
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
response = client.get(url)
assert response.status_code == 404
assert "Symbol Not Found" in response.content.decode("utf-8")
response = client.head(url)
assert response.status_code == 404
@pytest.mark.django_db
def test_client_404_logged(client, botomock, settings):
reload_downloaders("https://s3.example.com/private/prefix/")
settings.ENABLE_STORE_MISSING_SYMBOLS = True
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
return {}
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
assert client.get(url).status_code == 404
assert client.get(url).status_code == 404
# This one won't be logged because it's a HEAD
assert client.head(url).status_code == 404
# This one won't be logged because the filename is on a block list
# of symbol filenames to ignore
ignore_url = reverse(
"download:download_symbol",
args=("cxinjime.pdb", "342D9B0A3AE64812A2388C055C9F6C321", "file.ptr"),
)
response = client.get(ignore_url)
assert response.status_code == 404
assert response.content == b"Symbol Not Found (and ignored)"
# This one won't be logged either
ignore_url = reverse(
"download:download_symbol",
args=("cxinjime.pdb", "000000000000000000000000000000000", "cxinjime.sym"),
)
response = client.get(ignore_url)
assert response.status_code == 404
assert response.content == b"Symbol Not Found (and ignored)"
# This "should" have logged the missing symbols twice.
# Actually it shouldn't log it twice because the work on logging
# missing symbols is guarded by a memoizer that prevents it from
# executing more than once per arguments.
assert MissingSymbol.objects.all().count() == 1
assert MissingSymbol.objects.get(
symbol="xul.pdb",
debugid="44E4EC8C2F41492B9369D6B9A059577C2",
filename="xul.sym",
code_file__isnull=True,
code_id__isnull=True,
)
# Now look it up with ?code_file= and ?code_id= etc.
assert client.get(url, {"code_file": "xul.dll"}).status_code == 404
assert client.get(url, {"code_id": "deadbeef"}).status_code == 404
# both
assert (
client.get(url, {"code_file": "xul.dll", "code_id": "deadbeef"}).status_code
== 404
)
assert MissingSymbol.objects.all().count() == 4
assert MissingSymbol.objects.get(
symbol="xul.pdb",
debugid="44E4EC8C2F41492B9369D6B9A059577C2",
filename="xul.sym",
# The one with both set to something.
code_file="xul.dll",
code_id="deadbeef",
)
@pytest.mark.django_db
def test_client_404_logged_bad_code_file(client, botomock, settings):
"""The root of this test is to test something that's been observed
to happen in production; query strings for missing symbols with
values that contain URL encoded nullbytes (%00).
"""
reload_downloaders("https://s3.example.com/private/prefix/")
settings.ENABLE_STORE_MISSING_SYMBOLS = True
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
return {}
url = reverse(
"download:download_symbol",
args=("xul.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
with botomock(mock_api_call):
params = {"code_file": "\x00"}
assert client.head(url, params).status_code == 404
assert client.get(url, params).status_code == 400
# It won't get logged
assert not MissingSymbol.objects.all().exists()
# Same thing to happen if the 'code_id' contains nullbytes
params = {"code_id": "Nice\x00Try"}
assert client.head(url, params).status_code == 404
assert client.get(url, params).status_code == 400
assert not MissingSymbol.objects.all().exists()
def test_log_symbol_get_404_metrics(metricsmock):
views.log_symbol_get_404(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
code_file="",
code_id="",
)
records = metricsmock.get_records()
assert len(records) == 1
# Call it again with the exact same parameters
views.log_symbol_get_404(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
code_file="",
code_id="",
)
records = metricsmock.get_records()
assert len(records) == 1 # unchanged
# change one parameter slightly
views.log_symbol_get_404(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
code_file="",
code_id="deadbeef",
)
records = metricsmock.get_records()
assert len(records) == 2 # changed
@pytest.mark.django_db
def test_missingsymbols_csv(client, settings):
settings.ENABLE_STORE_MISSING_SYMBOLS = True
url = reverse("download:missingsymbols_csv")
response = client.get(url)
assert response.status_code == 200
assert response["Content-type"] == "text/csv"
today = timezone.now()
yesterday = today - datetime.timedelta(days=1)
expect_filename = yesterday.strftime("missing-symbols-%Y-%m-%d.csv")
assert expect_filename in response["Content-Disposition"]
lines = response.content.splitlines()
assert lines == [b"debug_file,debug_id,code_file,code_id"]
# Log at least one line
views.log_symbol_get_404(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
code_file="xul.dll",
code_id="deadbeef",
)
views.log_symbol_get_404(
"rooksdol_x64.dll",
"58B6E33D262000",
"rooksdol_x64.dl_",
code_file="",
code_id="",
)
# It's empty because it reports for yesterday, but we made the
# only log today.
response = client.get(url)
assert response.status_code == 200
content = response.content.decode("utf-8")
reader = csv.reader(StringIO(content))
lines_of_lines = list(reader)
assert len(lines_of_lines) == 2
line = lines_of_lines[1]
assert line[0] == "xul.pdb"
assert line[1] == "44E4EC8C2F41492B9369D6B9A059577C2"
assert line[2] == "xul.dll"
assert line[3] == "deadbeef"
@pytest.mark.django_db
def test_missingsymbols(client, settings):
settings.ENABLE_STORE_MISSING_SYMBOLS = True
# Empty db works fine
url = reverse("download:missingsymbols")
response = client.get(url)
assert response.status_code == 200
expected = {
"batch_size": 100,
"records": [],
"order_by": {"reverse": True, "sort": "modified_at"},
"page": 1,
"total_count": 0,
}
assert json.loads(response.content.decode("utf-8")) == expected
today = timezone.now()
yesterday = today - datetime.timedelta(days=1)
# Add a couple of missing symbols and set modified_at and created_at
# correctly
views.log_symbol_get_404(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
code_file="xul.dll",
code_id="deadbeef",
)
date_1 = yesterday.replace(hour=1, minute=1, second=1, microsecond=0)
MissingSymbol.objects.filter(symbol="xul.pdb").update(
modified_at=date_1, created_at=date_1
)
date_1_str = date_1.strftime("%Y-%m-%dT%H:%M:%SZ")
views.log_symbol_get_404(
"rooksdol_x64.dll",
"58B6E33D262000",
"rooksdol_x64.dl_",
code_file="",
code_id="",
)
date_2 = yesterday.replace(hour=2, minute=1, second=1, microsecond=0)
MissingSymbol.objects.filter(symbol="rooksdol_x64.dll").update(
modified_at=date_2, created_at=date_2
)
date_2_str = date_2.strftime("%Y-%m-%dT%H:%M:%SZ")
response = client.get(url)
assert response.status_code == 200
data = json.loads(response.content.decode("utf-8"))
expected = {
"batch_size": 100,
"order_by": {"reverse": True, "sort": "modified_at"},
"page": 1,
"records": [
{
"id": mock.ANY,
"code_file": None,
"code_id": None,
"count": 1,
"created_at": date_2_str,
"debugid": "58B6E33D262000",
"filename": "rooksdol_x64.dl_",
"modified_at": date_2_str,
"symbol": "rooksdol_x64.dll",
},
{
"id": mock.ANY,
"code_file": "xul.dll",
"code_id": "deadbeef",
"count": 1,
"created_at": date_1_str,
"debugid": "44E4EC8C2F41492B9369D6B9A059577C2",
"filename": "xul.sym",
"modified_at": date_1_str,
"symbol": "xul.pdb",
},
],
"total_count": 2,
}
assert data == expected
@pytest.mark.django_db
def test_store_missing_symbol_happy_path(metricsmock):
views.store_missing_symbol("foo.pdb", "ABCDEF12345", "foo.sym")
missing_symbol = MissingSymbol.objects.get(
symbol="foo.pdb",
debugid="ABCDEF12345",
filename="foo.sym",
code_file__isnull=True,
code_id__isnull=True,
)
assert missing_symbol.hash
assert missing_symbol.count == 1
first_modified_at = missing_symbol.modified_at
# Repeat and it should increment
views.store_missing_symbol("foo.pdb", "ABCDEF12345", "foo.sym")
missing_symbol.refresh_from_db()
assert missing_symbol.count == 2
assert missing_symbol.modified_at > first_modified_at
records = metricsmock.get_records()
assert len(records) == 2
assert records[0].key == "tecken.download_store_missing_symbol"
assert records[1].key == "tecken.download_store_missing_symbol"
# This time with a code_file and code_id
views.store_missing_symbol(
"foo.pdb",
"ABCDEF12345",
"foo.sym",
code_file="libsystem_pthread.dylib",
code_id="id",
)
second_missing_symbol = MissingSymbol.objects.get(
symbol="foo.pdb",
debugid="ABCDEF12345",
filename="foo.sym",
code_file="libsystem_pthread.dylib",
code_id="id",
)
assert second_missing_symbol.hash != missing_symbol.hash
assert second_missing_symbol.count == 1
@pytest.mark.django_db
def test_store_missing_symbol_skips():
# If either symbol, debugid or filename are too long nothing is stored
views.store_missing_symbol("x" * 200, "ABCDEF12345", "foo.sym")
views.store_missing_symbol("foo.pdb", "x" * 200, "foo.sym")
views.store_missing_symbol("foo.pdb", "ABCDEF12345", "x" * 200)
assert not MissingSymbol.objects.all().exists()
@pytest.mark.django_db
def test_store_missing_symbol_skips_bad_code_file_or_id():
# If the code_file or code_id is too long don't bother storing it.
views.store_missing_symbol("foo.pdb", "ABCDEF12345", "foo.sym", code_file="x" * 200)
views.store_missing_symbol("foo.pdb", "ABCDEF12345", "foo.sym", code_id="x" * 200)
assert not MissingSymbol.objects.all().exists()
@pytest.mark.django_db
def test_store_missing_symbol_client(client, botomock, settings):
settings.ENABLE_STORE_MISSING_SYMBOLS = True
reload_downloaders("https://s3.example.com/private/prefix/")
mock_calls = []
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
mock_calls.append(api_params["Prefix"])
return {}
url = reverse(
"download:download_symbol",
args=("foo.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "foo.ex_"),
)
with botomock(mock_api_call):
response = client.get(url, {"code_file": "something"})
assert response.status_code == 404
assert response.content == b"Symbol Not Found"
assert MissingSymbol.objects.all().count() == 1
# Pretend we're excessively eager
response = client.get(url, {"code_file": "something"})
assert response.status_code == 404
assert response.content == b"Symbol Not Found"
# This basically checks that the SymbolDownloader cache is
# not invalidated between calls.
assert len(mock_calls) == 1
# However, the act of triggering that
# store_missing_symbol() call is guarded by a
# cache. So it shouldn't have called it more than
# once.
assert MissingSymbol.objects.filter(count=1).count() == 1
def test_client_with_bad_filenames(client, botomock):
reload_downloaders("https://s3.example.com/private/prefix/")
def mock_api_call(self, operation_name, api_params):
assert operation_name == "ListObjectsV2"
return {"Contents": []}
with botomock(mock_api_call):
url = reverse(
"download:download_symbol",
args=(
"xül.pdb", # <-- note the extended ascii char
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
),
)
response = client.get(url)
assert response.status_code == 400
url = reverse(
"download:download_symbol",
args=(
"x%l.pdb", # <-- note the %
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul.sym",
),
)
response = client.get(url)
assert response.status_code == 400
url = reverse(
"download:download_symbol",
args=(
"xul.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"xul#.ex_", # <-- note the #
),
)
response = client.get(url)
assert response.status_code == 400
url = reverse(
"download:download_symbol",
args=(
"crypt3\x10.pdb",
"3D0443BF4FF5446B83955512615FD0942",
"crypt3\x10.pd_",
),
)
response = client.get(url)
assert response.status_code == 400
# There are many more characters that can cause a 400 response
# because the symbol or the filename contains, what's considered,
# invalid characters. But there are some that actually work
# that might be a bit surprising.
url = reverse(
"download:download_symbol",
args=("汉.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "xul.sym"),
)
response = client.get(url)
assert response.status_code == 404
url = reverse(
"download:download_symbol",
args=("foo.pdb", "44E4EC8C2F41492B9369D6B9A059577C2", "⚡️.sym"),
)
response = client.get(url)
assert response.status_code == 404
url = reverse(
"download:download_symbol",
args=(
"space in the filename.pdb",
"44E4EC8C2F41492B9369D6B9A059577C2",
"bar.ex_",
),
)
response = client.get(url)
assert response.status_code == 404
@pytest.mark.django_db
def test_cleanse_missingsymbol_delete_records():
"""cleanse_missingsymbol deletes appropriate records"""
today = timezone.now()
cutoff = today - datetime.timedelta(days=30)
# Create a record for today
MissingSymbol.objects.create(
hash="1",
symbol="xul.so",
debugid="1",
filename="xul.so",
)
# Create a record before the cutoff--since modified_at is an "auto_now"
# field, we need to mock time
with mock.patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = cutoff + datetime.timedelta(days=1)
MissingSymbol.objects.create(
hash="2",
symbol="xul.so",
debugid="2",
filename="xul.so",
)
# Create a record after the cutoff
with mock.patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = cutoff - datetime.timedelta(days=1)
MissingSymbol.objects.create(
hash="3",
symbol="xul.so",
debugid="3",
filename="xul.so",
)
for sym in MissingSymbol.objects.all():
print("1", sym, sym.hash, sym.modified_at)
stdout = StringIO()
call_command("cleanse_missingsymbol", dry_run=False, stdout=stdout)
output = stdout.getvalue()
assert "deleted missingsymbol=1" in output
# Verify that the record that was deleted was the old one
assert sorted(MissingSymbol.objects.values_list("hash", flat=True)) == ["1", "2"]
@pytest.mark.django_db
def test_cleanse_missingsymbol_delete_records_dry_run():
"""cleanse_missingsymbol dry-run doesn't delete records"""
today = timezone.now()
cutoff = today - datetime.timedelta(days=30)
# Create a record for today
MissingSymbol.objects.create(
hash="1",
symbol="xul.so",
debugid="1",
filename="xul.so",
)
# Create a record before the cutoff--since modified_at is an "auto_now"
# field, we need to mock time
with mock.patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = cutoff + datetime.timedelta(days=1)
MissingSymbol.objects.create(
hash="2",
symbol="xul.so",
debugid="2",
filename="xul.so",
)
# Create a record after the cutoff
with mock.patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = cutoff - datetime.timedelta(days=1)
MissingSymbol.objects.create(
hash="3",
symbol="xul.so",
debugid="3",
filename="xul.so",
)
for sym in MissingSymbol.objects.all():
print("1", sym, sym.hash, sym.modified_at)
stdout = StringIO()
call_command("cleanse_missingsymbol", dry_run=True, stdout=stdout)
output = stdout.getvalue()
assert "DRY RUN" in output
assert "deleted missingsymbol=1" in output
# Verify no records were deleted
assert sorted(MissingSymbol.objects.values_list("hash", flat=True)) == [
"1",
"2",
"3",
]
|
mpl-2.0
|
15568823811a3e3f7d80e58f284ce861
| 33.1082
| 88
| 0.609978
| 3.544024
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/libdockerflow.py
|
1
|
2292
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import json
from pathlib import Path
from django.core import checks
from django.conf import settings
from tecken.storage import StorageBucket, StorageError
def check_storage_urls(app_configs, **kwargs):
errors = []
checked = []
def check_url(url, setting_key):
if url in checked:
return
bucket = StorageBucket(url)
if not bucket.private:
return
try:
if not bucket.exists():
errors.append(
checks.Error(
f"Unable to connect to {url} (bucket={bucket.name!r}), "
f"because bucket not found",
id="tecken.health.E001",
)
)
except StorageError as error:
errors.append(
checks.Error(
f"Unable to connect to {url} (bucket={bucket.name!r}), "
f"due to {error.backend_msg}",
id="tecken.health.E002",
)
)
else:
checked.append(url)
for url in settings.SYMBOL_URLS:
check_url(url, "SYMBOL_URLS")
for url in settings.UPLOAD_URL_EXCEPTIONS.values():
check_url(url, "UPLOAD_URL_EXCEPTIONS")
return errors
def get_version_info(basedir):
"""Returns version.json data from deploys"""
path = Path(basedir) / "version.json"
if not path.exists():
return {}
try:
data = path.read_text()
return json.loads(data)
except (OSError, json.JSONDecodeError):
return {}
def get_release_name(basedir):
"""Return a friendly name for the release that is running
This pulls version data and then returns the best version-y thing available: the
version, the commit, or "unknown" if there's no version data.
:returns: string
"""
version_info = get_version_info(basedir)
version = version_info.get("version", "none")
commit = version_info.get("commit")
commit = commit[:8] if commit else "unknown"
return f"{version}:{commit}"
|
mpl-2.0
|
6c1576853545fb8955959934b82bd9b7
| 28.384615
| 84
| 0.579843
| 4.15971
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/librequests.py
|
1
|
2398
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class HTTPAdapterWithTimeout(HTTPAdapter):
"""HTTPAdapter with a default timeout
This allows you to set a default timeout when creating the adapter.
It can be overridden here as well as when doing individual
requests.
:arg varies default_timeout: number of seconds before timing out
This can be a float or a (connect timeout, read timeout) tuple
of floats.
Defaults to 5.0 seconds.
"""
def __init__(self, *args, **kwargs):
self._default_timeout = kwargs.pop("default_timeout", 5.0)
super().__init__(*args, **kwargs)
def send(self, *args, **kwargs):
# If there's a timeout, use that. Otherwise, use the default.
kwargs["timeout"] = kwargs.get("timeout") or self._default_timeout
return super().send(*args, **kwargs)
def session_with_retries(
total_retries=5,
backoff_factor=0.2,
status_forcelist=(429, 500),
default_timeout=5.0,
):
"""Returns session that retries on HTTP 429 and 500 with default timeout
:arg int total_retries: total number of times to retry
:arg float backoff_factor: number of seconds to increment by between
attempts
For example, 0.1 will back off 0.1s, then 0.2s, then 0.3s, ...
:arg tuple of HTTP codes status_forcelist: tuple of HTTP codes to
retry on
:arg varies default_timeout: number of seconds before timing out
This can be a float or a (connect timeout, read timeout) tuple
of floats.
:returns: a requests Session instance
"""
retries = Retry(
total=total_retries,
backoff_factor=backoff_factor,
status_forcelist=list(status_forcelist),
)
session = requests.Session()
# Set the User-Agent header so we can distinguish our stuff from other stuff
session.headers.update({"User-Agent": "tecken-requests/1.0"})
adapter = HTTPAdapterWithTimeout(
max_retries=retries, default_timeout=default_timeout
)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
|
mpl-2.0
|
e7695c595ec1437f78202b47bcc7a420
| 29.35443
| 80
| 0.673478
| 4.071307
| false
| false
| false
| false
|
mozilla-services/tecken
|
bin/make-a-zip.py
|
1
|
2604
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import os
import shutil
import tempfile
import time
from pathlib import Path
from urllib.parse import urljoin
from zipfile import ZipFile
import requests
def main(symbols, out_file, remote_url):
def fmt_size(b):
if b < 1024 * 1024:
return f"{b / 1024:.1f}KB"
return f"{b / 1024 / 1024:.1f}MB"
with tempfile.TemporaryDirectory() as tmpdir:
tmp_out_file = Path(tmpdir) / Path("symbols-{}.zip".format(int(time.time())))
with ZipFile(tmp_out_file, "w") as zip_:
for symbol in symbols:
if symbol.count("/") == 1:
lib_filename = symbol.split("/")[0]
if lib_filename.endswith(".pdb"):
symbol_filename = lib_filename[:-4] + ".sym"
else:
symbol_filename = lib_filename + ".sym"
symbol += "/" + symbol_filename
url = urljoin(remote_url, symbol)
fn = Path(tmpdir) / Path(symbol)
fn.parent.mkdir(parents=True, exist_ok=True)
with requests.get(url) as response, open(fn, "wb") as f:
f.write(response.content)
raw_size = int(response.headers["content-length"])
print(
"Downloaded {} bytes ({}, {} on disk) into {}"
"".format(
raw_size,
fmt_size(raw_size),
fmt_size(os.stat(fn).st_size),
fn.parent,
)
)
zip_.write(fn, arcname=Path(symbol))
shutil.move(tmp_out_file, out_file)
print("Wrote", os.path.abspath(out_file))
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("symbols", help="Symbols to download and include", nargs="*")
parser.add_argument(
"-o", "--out-file", help="ZIP file to create/update", default="symbols.zip"
)
parser.add_argument(
"-u",
"--remote-url",
help="URL to download symbols from",
default="https://symbols.mozilla.org",
)
args = parser.parse_args()
if not args.symbols:
print("Need at least 1 symbol", file=sys.stderr)
main(**vars(args))
|
mpl-2.0
|
a6f8c6d2013773dd5f6b3304c7c47cfb
| 33.263158
| 85
| 0.522657
| 4.139905
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/base/decorators.py
|
1
|
6480
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import logging
from tempfile import TemporaryDirectory
from functools import wraps
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import permission_required, user_passes_test
logger = logging.getLogger("tecken")
def api_login_required(view_func):
"""similar to django.contrib.auth.decorators.login_required
except instead of redirecting it returns a 403 message if not
authenticated."""
@wraps(view_func)
def inner(request, *args, **kwargs):
if not request.user.is_active:
error_msg = "This requires an Auth-Token to authenticate the request"
if not settings.ENABLE_TOKENS_AUTHENTICATION: # pragma: no cover
error_msg += " (ENABLE_TOKENS_AUTHENTICATION is False)"
raise PermissionDenied(error_msg)
return view_func(request, *args, **kwargs)
return inner
def api_permission_required(perm):
"""Slight override on django.contrib.auth.decorators.permission_required
that forces the `raise_exception` to be set to True.
"""
return permission_required(perm, raise_exception=True)
def api_any_permission_required(*perms):
"""Allow the user through if the user has any of the provided
permissions. If none, raise a PermissionDenied error.
Also, unlike the django.contrib.auth.decorators.permission_required,
in this one we hardcode it to raise PermissionDenied if the
any-permission check fails.
"""
def check_perms(user):
# First check if the user has the permission (even anon users)
for perm in perms:
if user.has_perm(perm):
return True
raise PermissionDenied
return user_passes_test(check_perms)
def api_superuser_required(view_func):
"""Decorator that will return a 403 JSON response if the user
is *not* a superuser.
Use this decorator *after* others like api_login_required.
"""
@wraps(view_func)
def inner(request, *args, **kwargs):
if not request.user.is_superuser:
error_msg = "Must be superuser to access this view."
raise PermissionDenied(error_msg)
return view_func(request, *args, **kwargs)
return inner
def set_request_debug(view_func):
"""When you use this decorator, the request object gets changed.
The request gets a new boolean attribute set to either True or False
called `_debug_request` if and only if the request has a header
'HTTP_DEBUG' that is 'True', 'Yes' or '1' (case insensitive).
Usage:
@set_request_debug
def myview(request):
debug = request._request_debug
assert debug in (True, False)
return http.HttpResponse(debug)
"""
@wraps(view_func)
def wrapper(request, *args, **kwargs):
trueish = ("1", "true", "yes")
debug = request.META.get("HTTP_DEBUG", "").lower() in trueish
request._request_debug = debug
return view_func(request, *args, **kwargs)
return wrapper
class JsonHttpResponseNotAllowed(http.JsonResponse):
status_code = 405
def __init__(self, permitted_methods, data, *args, **kwargs):
super().__init__(data, *args, **kwargs)
self["Allow"] = ", ".join(permitted_methods)
def api_require_http_methods(request_method_list):
"""
This is copied verbatim from django.views.decorators.require_http_methods
*except* it changes which HTTP response class to return.
All of this just to make it possible to always return a JSON response
when the request method is not allowed.
Also, it's changed to use the f'' string format.
"""
def decorator(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
message = f"Method Not Allowed ({request.method}): {request.path}"
logger.warning(message, extra={"status_code": 405, "request": request})
return JsonHttpResponseNotAllowed(
request_method_list, {"error": message}
)
return func(request, *args, **kwargs)
return inner
return decorator
api_require_GET = api_require_http_methods(["GET"])
api_require_GET.__doc__ = (
"Decorator to require that a view only accepts the GET method."
)
api_require_POST = api_require_http_methods(["POST"])
api_require_POST.__doc__ = (
"Decorator to require that a view only accepts the POST method."
)
api_require_safe = api_require_http_methods(["GET", "HEAD"])
api_require_safe.__doc__ = (
"Decorator to require that a view only accepts safe methods: GET and HEAD."
)
# Same default as https://github.com/ottoyiu/django-cors-headers#cors_allow_headers
DEFAULT_ALLOW_HEADERS = (
"accept",
"accept-encoding",
"authorization",
"content-type",
"dnt",
"origin",
"user-agent",
"x-csrftoken",
"x-requested-with",
)
def set_cors_headers(origin="*", methods="GET", allow_headers=DEFAULT_ALLOW_HEADERS):
"""Decorator function that sets CORS headers on the response."""
if isinstance(methods, str):
methods = [methods]
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
response = func(*args, **kwargs)
response["Access-Control-Allow-Origin"] = origin
response["Access-Control-Allow-Methods"] = ",".join(methods)
response["Access-Control-Allow-Headers"] = ",".join(allow_headers)
return response
return inner
return decorator
def make_tempdir(prefix=None, suffix=None):
"""Decorator that adds a last argument that is the path to a temporary
directory that gets deleted after the function has finished.
Usage::
@make_tempdir()
def some_function(arg1, arg2, tempdir, kwargs1='one'):
assert os.path.isdir(tempdir)
...
"""
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
with TemporaryDirectory(prefix=prefix, suffix=suffix) as f:
args = args + (f,)
return func(*args, **kwargs)
return inner
return decorator
|
mpl-2.0
|
b17c125475da077521a1c12e06e60001
| 30.764706
| 87
| 0.64892
| 4.124761
| false
| false
| false
| false
|
mozilla-services/tecken
|
tecken/api/views.py
|
1
|
35275
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import datetime
import logging
import markus
from django import http
from django.conf import settings
from django.urls import reverse
from django.contrib.auth.models import Permission, User
from django.db.models import Aggregate, Count, Q, Sum, Avg, Min
from django.utils import timezone
from django.shortcuts import get_object_or_404
from django.core.exceptions import PermissionDenied, BadRequest
from django.core.cache import cache
from tecken.api import forms
from tecken.base.decorators import (
api_login_required,
api_permission_required,
api_require_http_methods,
api_superuser_required,
)
from tecken.base.form_utils import filter_form_dates, ORM_OPERATORS, PaginationForm
from tecken.base.utils import filesizeformat
from tecken.download.models import MissingSymbol
from tecken.storage import StorageBucket
from tecken.tokens.models import Token
from tecken.upload.models import Upload, FileUpload, UploadsCreated
from tecken.upload.views import get_possible_bucket_urls
logger = logging.getLogger("tecken")
metrics = markus.get_metrics("tecken")
# Arbitrary big number for paging so we don't do a count on the full table; this
# needs to match the big number used in the frontend
BIG_NUMBER = 1_000_000
class SumCardinality(Aggregate):
template = "SUM(CARDINALITY(%(expressions)s))"
@metrics.timer_decorator("api", tags=["endpoint:auth"])
def auth(request):
context = {}
if request.user.is_authenticated:
context["user"] = {
"email": request.user.email,
"is_active": request.user.is_active,
"is_superuser": request.user.is_superuser,
"permissions": [],
}
permissions = Permission.objects.filter(
codename__in=(
"view_all_uploads",
"upload_symbols",
"upload_try_symbols",
"manage_tokens",
)
)
user_permissions = request.user.get_all_permissions()
for permission in permissions.select_related("content_type"):
codename = f"{permission.content_type.app_label}.{permission.codename}"
if codename in user_permissions:
context["user"]["permissions"].append(
{"id": permission.id, "codename": codename, "name": permission.name}
)
# do we need to add the one for managing tokens?
context["sign_out_url"] = request.build_absolute_uri(reverse("oidc_logout"))
else:
if settings.DEBUG: # pragma: no cover
if (
settings.OIDC_RP_CLIENT_ID == "mustbesomething"
or settings.OIDC_RP_CLIENT_SECRET == "mustbesomething"
):
# When you start up Tecken for the very first time and
# you haven't configured OIDC credentials, let's make a stink
# about this.
print(
"WARNING!\nSeems you haven't configured the necessary "
"OIDC environment variables OIDC_RP_CLIENT_ID and "
"OIDC_RP_CLIENT_SECRET.\n"
"Check your .env file and make sure you have something "
"set for DJANGO_OIDC_RP_CLIENT_ID and "
"DJANGO_OIDC_RP_CLIENT_SECRET.\n"
"Signing in won't work until this is set.\n"
)
context["sign_in_url"] = request.build_absolute_uri(
reverse("oidc_authentication_init")
)
context["user"] = None
return http.JsonResponse(context)
@api_login_required
@metrics.timer_decorator("api", tags=["endpoint:possible_upload_urls"])
def possible_upload_urls(request):
context = {"urls": []}
seen = set()
for url, private_or_public in get_possible_bucket_urls(request.user):
bucket_info = StorageBucket(url)
if bucket_info.name in seen:
continue
seen.add(bucket_info.name)
context["urls"].append(
{
"url": url,
"bucket_name": bucket_info.name,
"private": private_or_public == "private",
"default": url == settings.UPLOAD_DEFAULT_URL,
}
)
context["urls"].reverse() # Default first
return http.JsonResponse(context)
@api_login_required
@api_permission_required("tokens.manage_tokens")
def tokens(request):
def serialize_permissions(permissions):
return [{"name": x.name, "id": x.id} for x in permissions]
all_permissions = [
Permission.objects.get(codename="upload_symbols"),
Permission.objects.get(codename="upload_try_symbols"),
Permission.objects.get(codename="view_all_uploads"),
Permission.objects.get(codename="manage_tokens"),
]
all_user_permissions = request.user.get_all_permissions()
possible_permissions = [
x
for x in all_permissions
if (
f"{x.content_type.model}.{x.codename}" in all_user_permissions
or request.user.is_superuser
)
]
if request.method == "POST":
form = forms.TokenForm(request.POST)
if form.is_valid():
# Check that none of the sent permissions isn't possible
for permission in form.cleaned_data["permissions"]:
if permission not in possible_permissions:
raise PermissionDenied(f"{permission.name} not a valid permission")
expires_at = timezone.now() + datetime.timedelta(
days=form.cleaned_data["expires"]
)
token = Token.objects.create(
user=request.user,
expires_at=expires_at,
notes=form.cleaned_data["notes"].strip(),
)
for permission in form.cleaned_data["permissions"]:
token.permissions.add(permission)
return http.JsonResponse({"ok": True}, status=201)
else:
return http.JsonResponse({"errors": form.errors}, status=400)
form = forms.TokensForm(request.GET)
if not form.is_valid():
return http.JsonResponse({"errors": form.errors}, status=400)
filter_state = form.cleaned_data["state"]
context = {"tokens": [], "permissions": serialize_permissions(possible_permissions)}
qs = Token.objects.filter(user=request.user)
# Before we filter the queryset further, use it to calculate counts.
context["totals"] = {
"all": qs.count(),
"active": qs.filter(expires_at__gt=timezone.now()).count(),
"expired": qs.filter(expires_at__lte=timezone.now()).count(),
}
if filter_state == "all":
pass
elif filter_state == "expired":
qs = qs.filter(expires_at__lte=timezone.now())
else:
# The default is to only return active ones
qs = qs.filter(expires_at__gt=timezone.now())
for token in qs.order_by("-created_at"):
context["tokens"].append(
{
"id": token.id,
"expires_at": token.expires_at,
"is_expired": token.is_expired,
"key": token.key,
"permissions": serialize_permissions(token.permissions.all()),
"notes": token.notes,
"created_at": token.created_at,
}
)
return http.JsonResponse(context)
@api_require_http_methods(["DELETE"])
@api_login_required
def delete_token(request, id):
if request.user.is_superuser:
token = get_object_or_404(Token, id=id)
else:
token = get_object_or_404(Token, id=id, user=request.user)
token.delete()
return http.JsonResponse({"ok": True})
@api_require_http_methods(["POST"])
@api_login_required
def extend_token(request, id):
token = get_object_or_404(Token, id=id, user=request.user)
form = forms.ExtendTokenForm(request.POST)
if not form.is_valid():
return http.JsonResponse({"errors": form.errors}, status=400)
days = form.cleaned_data["days"] or 365
token.expires_at = token.expires_at + datetime.timedelta(days=days)
token.save()
return http.JsonResponse({"ok": True, "days": days})
def _uploads_content(form, pagination_form, qs, can_view_all):
content = {"can_view_all": can_view_all}
batch_size = settings.API_UPLOADS_BATCH_SIZE
page = pagination_form.cleaned_data["page"]
start = (page - 1) * batch_size
end = start + batch_size + 1
has_next = False
if form.cleaned_data.get("order_by"):
order_by = form.cleaned_data["order_by"]
else:
order_by = {"sort": "created_at", "reverse": True}
rows = []
order_by_string = ("-" if order_by["reverse"] else "") + order_by["sort"]
uploads = qs.select_related("user").order_by(order_by_string)[start:end]
for i, upload in enumerate(uploads):
if i == batch_size:
has_next = True
break
rows.append(
{
"id": upload.id,
"user": {"email": upload.user.email},
"filename": upload.filename,
"size": upload.size,
"bucket_name": upload.bucket_name,
"bucket_region": upload.bucket_region,
"bucket_endpoint_url": upload.bucket_endpoint_url,
"skipped_keys": upload.skipped_keys or [],
"ignored_keys": upload.ignored_keys or [],
"try_symbols": upload.try_symbols,
"download_url": upload.download_url,
"redirect_urls": upload.redirect_urls or [],
"completed_at": upload.completed_at,
"created_at": upload.created_at,
}
)
# Make a FileUpload aggregate count on these uploads
file_uploads = FileUpload.objects.filter(upload_id__in=[x["id"] for x in rows])
# Convert it to a dict
file_upload_counts_map = {
x["upload"]: x["count"]
for x in file_uploads.values("upload").annotate(count=Count("upload"))
}
# Convert it to a dict
file_upload_counts_map = {
x["upload"]: x["count"]
for x in file_uploads.values("upload")
.filter(completed_at__isnull=False)
.annotate(count=Count("upload"))
}
# And a dict of all the incomplete ones
file_upload_incomplete_counts_map = {
x["upload"]: x["count"]
for x in file_uploads.filter(completed_at__isnull=True)
.values("upload")
.annotate(count=Count("upload"))
}
for upload in rows:
upload["files_count"] = file_upload_counts_map.get(upload["id"], 0)
upload["files_incomplete_count"] = file_upload_incomplete_counts_map.get(
upload["id"], 0
)
content["uploads"] = rows
content["batch_size"] = batch_size
content["order_by"] = order_by
content["has_next"] = has_next
# NOTE(willkg): This is the only way I could figure out to determine whether a
# queryset had filters applied to it. We check that and if there are filters, we do
# the count and if there are not filters, then we use BIG_NUMBER so we don't have to
# do a row count in the postgres table of the entire table.
if qs._has_filters().__dict__["children"]:
content["total"] = qs.count()
else:
content["total"] = BIG_NUMBER
return content
@metrics.timer_decorator("api", tags=["endpoint:uploads"])
@api_login_required
def uploads(request):
form = forms.UploadsForm(request.GET, valid_sorts=("size", "created_at"))
if not form.is_valid():
return http.JsonResponse({"errors": form.errors}, status=400)
pagination_form = PaginationForm(request.GET)
if not pagination_form.is_valid():
return http.JsonResponse({"errors": pagination_form.errors}, status=400)
can_view_all = request.user.has_perm("upload.view_all_uploads")
qs = Upload.objects.all()
qs = filter_uploads(qs, can_view_all, request.user, form)
context = _uploads_content(form, pagination_form, qs, can_view_all)
return http.JsonResponse(context)
def filter_uploads(qs, can_view_all, user, form):
# Force the filtering to *your* symbols unless you have the
# 'view_all_uploads' permission.
if can_view_all:
if form.cleaned_data["user"]:
operator, user = form.cleaned_data["user"]
qs_function = qs.exclude if operator == "!" else qs.filter
# If the form managed to convert it to an instance,
# the select queried doesn't need to do a join.
# Otherwise do a regex on its email.
if isinstance(user, str):
qs = qs_function(user__email__icontains=user)
else:
qs = qs_function(user=user)
else:
qs = qs.filter(user=user)
for operator, value in form.cleaned_data["size"]:
orm_operator = "size__{}".format(ORM_OPERATORS[operator])
qs = qs.filter(**{orm_operator: value})
qs = filter_form_dates(qs, form, ("created_at", "completed_at"))
return qs
@metrics.timer_decorator("api", tags=["endpoint:upload"])
@api_login_required
def upload(request, id):
obj = get_object_or_404(Upload, id=id)
# You're only allowed to see this if it's yours or you have the
# 'view_all_uploads' permission.
if not (
obj.user == request.user or request.user.has_perm("upload.view_all_uploads")
):
raise PermissionDenied("Insufficient access to view this upload")
def make_upload_dict(upload_obj):
file_uploads_qs = FileUpload.objects.filter(upload=upload_obj)
file_uploads = []
for file_upload in file_uploads_qs.order_by("created_at"):
file_uploads.append(
{
"id": file_upload.id,
"bucket_name": file_upload.bucket_name,
"key": file_upload.key,
"update": file_upload.update,
"compressed": file_upload.compressed,
"size": file_upload.size,
"completed_at": file_upload.completed_at,
"created_at": file_upload.created_at,
}
)
return {
"id": upload_obj.id,
"filename": upload_obj.filename,
"user": {"id": upload_obj.user.id, "email": upload_obj.user.email},
"size": upload_obj.size,
"bucket_name": upload_obj.bucket_name,
"bucket_region": upload_obj.bucket_region,
"bucket_endpoint_url": upload_obj.bucket_endpoint_url,
"skipped_keys": upload_obj.skipped_keys or [],
"ignored_keys": upload_obj.ignored_keys or [],
"try_symbols": upload_obj.try_symbols,
"download_url": upload_obj.download_url,
"redirect_urls": upload_obj.redirect_urls or [],
"completed_at": upload_obj.completed_at,
"created_at": upload_obj.created_at,
"file_uploads": file_uploads,
}
upload_dict = make_upload_dict(obj)
related_qs = Upload.objects.exclude(id=obj.id).filter(size=obj.size, user=obj.user)
if obj.content_hash:
related_qs = related_qs.filter(content_hash=obj.content_hash)
else:
# The `content_hash` attribute is a new field as of Oct 10 2017.
# So if the upload doesn't have that, use the filename which is
# less than ideal.
related_qs = related_qs.filter(filename=obj.filename)
upload_dict["related"] = []
for related_upload in related_qs.order_by("-created_at"):
upload_dict["related"].append(make_upload_dict(related_upload))
context = {"upload": upload_dict}
return http.JsonResponse(context)
@api_login_required
@api_permission_required("upload.view_all_uploads")
def uploads_created(request):
context = {"uploads_created": []}
form = forms.UploadsCreatedForm(request.GET, valid_sorts=("size", "date"))
if not form.is_valid():
return http.JsonResponse({"errors": form.errors}, status=400)
pagination_form = PaginationForm(request.GET)
if not pagination_form.is_valid():
return http.JsonResponse({"errors": pagination_form.errors}, status=400)
qs = UploadsCreated.objects.all()
qs = filter_uploads_created(qs, form)
batch_size = settings.API_UPLOADS_CREATED_BATCH_SIZE
page = pagination_form.cleaned_data["page"]
start = (page - 1) * batch_size
end = start + batch_size
aggregates_numbers = qs.aggregate(
count=Sum("count"),
total=Count("id"),
size_avg=Avg("size"),
size=Sum("size"),
files=Sum("files"),
skipped=Sum("skipped"),
ignored=Sum("ignored"),
)
context["aggregates"] = {
"uploads_created": {
"count": aggregates_numbers["count"],
"files": aggregates_numbers["files"],
"size": aggregates_numbers["size"],
"size_avg": aggregates_numbers["size_avg"],
"skipped": aggregates_numbers["skipped"],
"ignored": aggregates_numbers["ignored"],
}
}
if form.cleaned_data.get("order_by"):
order_by = form.cleaned_data["order_by"]
else:
order_by = {"sort": "date", "reverse": True}
rows = []
order_by_string = ("-" if order_by["reverse"] else "") + order_by["sort"]
for created in qs.order_by(order_by_string)[start:end]:
rows.append(
{
"id": created.id,
"date": created.date,
"count": created.count,
"files": created.files,
"skipped": created.skipped,
"ignored": created.ignored,
"size": created.size,
"size_avg": created.size_avg,
"created_at": created.created_at,
# "modified_at": created.modified_at,
}
)
context["uploads_created"] = rows
context["total"] = aggregates_numbers["total"]
context["batch_size"] = batch_size
context["order_by"] = order_by
return http.JsonResponse(context)
def filter_uploads_created(qs, form):
for key in ("size", "count"):
for operator, value in form.cleaned_data[key]:
orm_operator = "{}__{}".format(key, ORM_OPERATORS[operator])
qs = qs.filter(**{orm_operator: value})
qs = filter_form_dates(qs, form, ("date",))
return qs
@api_login_required
@api_superuser_required
def uploads_created_backfilled(request):
"""Temporary function that serves two purposes. Ability to see if all the
UploadsCreated have been backfilled and actually do some backfill."""
context = {}
min_uploads = Upload.objects.aggregate(min=Min("created_at"))["min"]
days_till_today = (timezone.now() - min_uploads).days
uploads_created_count = UploadsCreated.objects.all().count()
context["uploads_created_count"] = uploads_created_count
context["days_till_today"] = days_till_today
context["backfilled"] = bool(
uploads_created_count and days_till_today + 1 == uploads_created_count
)
if request.method == "POST":
days = int(request.POST.get("days", 2))
force = request.POST.get("force", "no") not in ("no", "0", "false")
start = min_uploads.date()
today = timezone.now().date()
context["updated"] = []
while start <= today:
if force or not UploadsCreated.objects.filter(date=start).exists():
record = UploadsCreated.update(start)
context["updated"].append({"date": record.date, "count": record.count})
if len(context["updated"]) >= days:
break
start += datetime.timedelta(days=1)
context["backfilled"] = True
return http.JsonResponse(context)
def _upload_files_build_qs(request):
form = forms.FileUploadsForm(request.GET)
if not form.is_valid():
return http.JsonResponse({"errors": form.errors}, status=400)
qs = FileUpload.objects.all()
for operator, value in form.cleaned_data["size"]:
orm_operator = "size__{}".format(ORM_OPERATORS[operator])
qs = qs.filter(**{orm_operator: value})
qs = filter_form_dates(qs, form, ("created_at", "completed_at"))
if form.cleaned_data.get("key"):
key_q = Q(key__icontains=form.cleaned_data["key"][0])
for other in form.cleaned_data["key"][1:]:
key_q &= Q(key__icontains=other)
qs = qs.filter(key_q)
include_bucket_names = []
for operator, bucket_name in form.cleaned_data["bucket_name"]:
if operator == "!":
qs = qs.exclude(bucket_name=bucket_name)
else:
include_bucket_names.append(bucket_name)
if include_bucket_names:
qs = qs.filter(bucket_name__in=include_bucket_names)
return qs
def _upload_files_content(request, qs):
pagination_form = PaginationForm(request.GET)
if not pagination_form.is_valid():
raise BadRequest("formerrors", pagination_form.errors)
page = pagination_form.cleaned_data["page"]
files = []
batch_size = settings.API_FILES_BATCH_SIZE
start = (page - 1) * batch_size
end = start + batch_size + 1
has_next = False
upload_ids = set()
file_uploads = qs.order_by("-created_at")[start:end]
for i, file_upload in enumerate(file_uploads):
if i == batch_size:
has_next = True
break
files.append(
{
"id": file_upload.id,
"key": file_upload.key,
"update": file_upload.update,
"compressed": file_upload.compressed,
"size": file_upload.size,
"bucket_name": file_upload.bucket_name,
"completed_at": file_upload.completed_at,
"created_at": file_upload.created_at,
"upload": file_upload.upload_id,
}
)
if file_upload.upload_id:
upload_ids.add(file_upload.upload_id)
uploads = {
x.id: x for x in Upload.objects.filter(id__in=upload_ids).select_related("user")
}
uploads_cache = {}
def hydrate_upload(upload_id):
if upload_id:
if upload_id not in uploads_cache:
upload = uploads[upload_id]
uploads_cache[upload_id] = {
"id": upload.id,
"try_symbols": upload.try_symbols,
"user": {"id": upload.user.id, "email": upload.user.email},
"created_at": upload.created_at,
}
return uploads_cache[upload_id]
for file_upload in files:
file_upload["upload"] = hydrate_upload(file_upload["upload"])
content = {
"files": files,
"has_next": has_next,
"batch_size": batch_size,
}
# NOTE(willkg): This is the only way I could figure out to determine whether a
# queryset had filters applied to it. We check that and if there are filters, we do
# the count and if there are not filters, then we use BIG_NUMBER so we don't have to
# do a row count in the postgres table of the entire table.
if qs._has_filters().__dict__["children"]:
content["total"] = qs.count()
else:
content["total"] = BIG_NUMBER
return content
@metrics.timer_decorator("api", tags=["endpoint:upload_files"])
@api_login_required
@api_permission_required("upload.view_all_uploads")
def upload_files(request):
qs = _upload_files_build_qs(request)
try:
context = _upload_files_content(request, qs)
except BadRequest as e:
return http.JsonResponse({"errors": e.args[1]}, status=400)
return http.JsonResponse(context)
@metrics.timer_decorator("api", tags=["endpoint:upload_file"])
@api_login_required
def upload_file(request, id):
file_upload = get_object_or_404(FileUpload, id=id)
# You're only allowed to see this if it's yours or you have the
# 'view_all_uploads' permission.
if not (
(file_upload.upload and file_upload.upload.user == request.user)
or request.user.has_perm("upload.view_all_uploads")
):
raise PermissionDenied("Insufficient access to view this file")
symbol, debugid, filename = file_upload.key.split("/")[-3:]
url = reverse("download:download_symbol", args=(symbol, debugid, filename))
if file_upload.upload and file_upload.upload.try_symbols:
url += "?try"
file_dict = {
"id": file_upload.id,
"bucket_name": file_upload.bucket_name,
"key": file_upload.key,
"update": file_upload.update,
"compressed": file_upload.compressed,
"size": file_upload.size,
"url": url,
"completed_at": file_upload.completed_at,
"created_at": file_upload.created_at,
"upload": None,
}
if file_upload.upload:
upload_obj = file_upload.upload
file_dict["upload"] = {
"id": upload_obj.id,
"filename": upload_obj.filename,
"user": {"id": upload_obj.user.id, "email": upload_obj.user.email},
"size": upload_obj.size,
"bucket_name": upload_obj.bucket_name,
"bucket_region": upload_obj.bucket_region,
"bucket_endpoint_url": upload_obj.bucket_endpoint_url,
"skipped_keys": upload_obj.skipped_keys or [],
"ignored_keys": upload_obj.ignored_keys or [],
"download_url": upload_obj.download_url,
"redirect_urls": upload_obj.redirect_urls or [],
"created_at": upload_obj.created_at,
"completed_at": upload_obj.completed_at,
}
context = {"file": file_dict}
return http.JsonResponse(context)
@metrics.timer_decorator("api", tags=["endpoint:stats"])
@api_login_required
def stats(request):
numbers = {}
all_uploads = request.user.has_perm("upload.can_view_all")
today = timezone.now()
start_today = today.replace(hour=0, minute=0, second=0)
start_yesterday = start_today - datetime.timedelta(days=1)
last_30_days = today - datetime.timedelta(days=30)
if not all_uploads:
with metrics.timer("api_stats", tags=["section:your_uploads"]):
# If it's an individual user, they can only see their own uploads and
# thus can't use UploadsCreated.
upload_qs = Upload.objects.filter(user=request.user)
files_qs = FileUpload.objects.filter(upload__user=request.user)
def count_and_size(qs, start, end):
sub_qs = qs.filter(created_at__gte=start, created_at__lt=end)
return sub_qs.aggregate(count=Count("id"), total_size=Sum("size"))
def count(qs, start, end):
sub_qs = qs.filter(created_at__gte=start, created_at__lt=end)
return sub_qs.aggregate(count=Count("id"))
numbers["uploads"] = {
"all_uploads": all_uploads,
"today": count_and_size(upload_qs, start_today, today),
"yesterday": count_and_size(upload_qs, start_yesterday, start_today),
"last_30_days": count_and_size(upload_qs, last_30_days, today),
}
numbers["files"] = {
"today": count(files_qs, start_today, today),
"yesterday": count(files_qs, start_yesterday, start_today),
"last_30_days": count(files_qs, last_30_days, today),
}
else:
with metrics.timer("api_stats", tags=["section:all_uploads"]):
def count_and_size(start, end):
return UploadsCreated.objects.filter(
date__gte=start.date(), date__lt=end.date()
).aggregate(
count=Sum("count"), total_size=Sum("size"), files=Sum("files")
)
_today = count_and_size(today, today + datetime.timedelta(days=1))
_yesterday = count_and_size(today - datetime.timedelta(days=1), today)
count_last_30_days = count_and_size(
last_30_days, today + datetime.timedelta(days=1)
)
numbers["uploads"] = {
"all_uploads": all_uploads,
"today": {"count": _today["count"], "total_size": _today["total_size"]},
"yesterday": {
"count": _yesterday["count"],
"total_size": _yesterday["total_size"],
},
"last_30_days": {
"count": count_last_30_days["count"],
"total_size": count_last_30_days["total_size"],
},
}
numbers["files"] = {
"today": {"count": _today["files"]},
"yesterday": {"count": _yesterday["files"]},
"last_30_days": {"count": count_last_30_days["files"]},
}
with metrics.timer("api_stats", tags=["section:all_missing_downloads"]):
# When doing aggregates on rows that don't exist you can get a None instead
# of 0. Only really happens in cases where you have extremely little in the
# database.
def nones_to_zero(obj):
for key, value in obj.items():
if isinstance(value, dict):
nones_to_zero(value)
elif value is None:
obj[key] = 0
nones_to_zero(numbers)
missing_qs = MissingSymbol.objects.all()
def count_missing(start, end, use_cache=True):
count = None
if use_cache:
fmt = "%Y%m%d"
cache_key = f"count_missing:{start.strftime(fmt)}:{end.strftime(fmt)}"
count = cache.get(cache_key)
if count is None:
qs = missing_qs.filter(modified_at__gte=start, modified_at__lt=end)
count = qs.count()
if use_cache:
cache.set(cache_key, count, 60 * 60 * 24)
return {"count": count}
numbers["downloads"] = {
"missing": {
"today": count_missing(start_today, today, use_cache=False),
"yesterday": count_missing(start_yesterday, start_today),
"last_30_days": count_missing(last_30_days, start_today),
},
}
# A clever trick! Instead of counting the last_30_days to include now,
# we count the last 29 days instead up until the start of today.
# Then, to make it the last 30 days we *add* the "today" count.
numbers["downloads"]["missing"]["last_30_days"]["count"] += numbers[
"downloads"
]["missing"]["today"]["count"]
with metrics.timer("api_stats", tags=["section:your_tokens"]):
# Gather some numbers about tokens
tokens_qs = Token.objects.filter(user=request.user)
numbers["tokens"] = {
"total": tokens_qs.count(),
"expired": tokens_qs.filter(expires_at__lt=today).count(),
}
# Gather some numbers about users
if request.user.is_superuser:
with metrics.timer("api_stats", tags=["section:all_users"]):
users_qs = User.objects.all()
numbers["users"] = {
"total": users_qs.count(),
"superusers": users_qs.filter(is_superuser=True).count(),
"active": users_qs.filter(is_active=True).count(),
"not_active": users_qs.filter(is_active=False).count(),
}
context = {"stats": numbers}
return http.JsonResponse(context)
@api_login_required
def stats_uploads(request):
context = {}
today = timezone.now().date()
yesterday = today - datetime.timedelta(days=1)
start_month = today
while start_month.day != 1:
start_month -= datetime.timedelta(days=1)
def count_uploads(date, end=None):
qs = UploadsCreated.objects.filter(date__gte=date)
if end is not None:
qs = qs.filter(date__lt=end)
aggregates = qs.aggregate(
count=Sum("count"), total_size=Sum("size"), files=Sum("files")
)
return {
"count": aggregates["count"] or 0,
"total_size": aggregates["total_size"] or 0,
"total_size_human": filesizeformat(aggregates["total_size"] or 0),
"files": aggregates["files"] or 0,
}
context["uploads"] = {
"today": count_uploads(today),
"yesterday": count_uploads(yesterday, end=today),
"this_month": count_uploads(start_month),
}
return http.JsonResponse(context)
@metrics.timer_decorator("api", tags=["endpoint:downloads_missing"])
def downloads_missing(request):
context = {}
form = forms.DownloadsMissingForm(
request.GET, valid_sorts=("modified_at", "count", "created_at")
)
if not form.is_valid():
return http.JsonResponse({"errors": form.errors}, status=400)
pagination_form = PaginationForm(request.GET)
if not pagination_form.is_valid():
return http.JsonResponse({"errors": pagination_form.errors}, status=400)
qs = MissingSymbol.objects.all()
qs = filter_missing_symbols(qs, form)
batch_size = settings.API_DOWNLOADS_MISSING_BATCH_SIZE
context["batch_size"] = batch_size
page = pagination_form.cleaned_data["page"]
start = (page - 1) * batch_size
end = start + batch_size
total_count = qs.count()
context["aggregates"] = {"missing": {"total": total_count}}
today = timezone.now()
for days in (1, 30):
count = qs.filter(
modified_at__gte=today - datetime.timedelta(days=days)
).count()
context["aggregates"]["missing"][f"last_{days}_days"] = count
context["total"] = context["aggregates"]["missing"]["total"]
if form.cleaned_data.get("order_by"):
order_by = form.cleaned_data["order_by"]
else:
order_by = {"sort": "modified_at", "reverse": True}
rows = []
order_by_string = ("-" if order_by["reverse"] else "") + order_by["sort"]
for missing in qs.order_by(order_by_string)[start:end]:
rows.append(
{
"id": missing.id,
"symbol": missing.symbol,
"debugid": missing.debugid,
"filename": missing.filename,
"code_file": missing.code_file,
"code_id": missing.code_id,
"count": missing.count,
"modified_at": missing.modified_at,
"created_at": missing.created_at,
}
)
context["missing"] = rows
context["order_by"] = order_by
return http.JsonResponse(context)
def filter_missing_symbols(qs, form):
qs = filter_form_dates(qs, form, ("created_at", "modified_at"))
for operator, value in form.cleaned_data["count"]:
orm_operator = "count__{}".format(ORM_OPERATORS[operator])
qs = qs.filter(**{orm_operator: value})
for key in ("symbol", "debugid", "filename"):
if form.cleaned_data[key]:
qs = qs.filter(**{f"{key}__contains": form.cleaned_data[key]})
return qs
|
mpl-2.0
|
5880e132a2b283d8848a261caaf908a4
| 36.407211
| 88
| 0.585996
| 3.861522
| false
| false
| false
| false
|
mozilla-services/tecken
|
eliot-service/tests/test_downloader.py
|
1
|
2624
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import pytest
from eliot.downloader import (
ErrorFileNotFound,
FileNotFound,
HTTPSource,
SymbolFileDownloader,
)
FAKE_HOST = "http://example.com"
FAKE_HOST2 = "http://2.example.com"
class TestHTTPSource:
def test_get(self, requestsmock):
data = b"abcde"
requestsmock.get(
FAKE_HOST + "/xul.so/ABCDE/xul.sym", status_code=200, content=data
)
source = HTTPSource(FAKE_HOST)
ret = source.get("xul.so", "ABCDE", "xul.sym")
assert ret == data
assert type(ret) == type(data)
def test_get_404(self, requestsmock):
requestsmock.get(FAKE_HOST + "/xul.so/ABCDE/xul.sym", status_code=404)
source = HTTPSource(FAKE_HOST)
with pytest.raises(FileNotFound):
source.get("xul.so", "ABCDE", "xul.sym")
def test_get_500(self, requestsmock):
requestsmock.get(FAKE_HOST + "/xul.so/ABCDE/xul.sym", status_code=500)
source = HTTPSource(FAKE_HOST)
with pytest.raises(ErrorFileNotFound):
source.get("xul.so", "ABCDE", "xul.sym")
class TestSymbolFileDownloader:
def test_get(self, requestsmock):
data_1 = b"abcde"
data_2 = b"12345"
requestsmock.get(
FAKE_HOST + "/xul.so/ABCDE/xul.sym", status_code=200, content=data_1
)
requestsmock.get(
FAKE_HOST2 + "/xul.so/ABCDE/xul.sym", status_code=200, content=data_2
)
downloader = SymbolFileDownloader(source_urls=[FAKE_HOST, FAKE_HOST2])
ret = downloader.get("xul.so", "ABCDE", "xul.sym")
assert ret == data_1
def test_get_from_second(self, requestsmock):
data = b"abcde"
requestsmock.get(FAKE_HOST + "/xul.so/ABCDE/xul.sym", status_code=404)
requestsmock.get(
FAKE_HOST2 + "/xul.so/ABCDE/xul.sym", status_code=200, content=data
)
downloader = SymbolFileDownloader(source_urls=[FAKE_HOST, FAKE_HOST2])
ret = downloader.get("xul.so", "ABCDE", "xul.sym")
assert ret == data
def test_404(self, requestsmock):
requestsmock.get(FAKE_HOST + "/xul.so/ABCDE/xul.sym", status_code=404)
requestsmock.get(FAKE_HOST2 + "/xul.so/ABCDE/xul.sym", status_code=404)
downloader = SymbolFileDownloader(source_urls=[FAKE_HOST, FAKE_HOST2])
with pytest.raises(FileNotFound):
downloader.get("xul.so", "ABCDE", "xul.sym")
|
mpl-2.0
|
019c56e483caa059f54a94fa9682bd81
| 32.641026
| 81
| 0.62157
| 3.188335
| false
| true
| false
| false
|
mozilla-services/tecken
|
tecken/storage.py
|
1
|
6501
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import re
from urllib.parse import urlparse
from botocore.exceptions import BotoCoreError, ClientError
import boto3
from botocore.config import Config
from django.conf import settings
ALL_POSSIBLE_S3_REGIONS = tuple(boto3.session.Session().get_available_regions("s3"))
class StorageError(Exception):
"""A backend-specific client reported an error."""
def __init__(self, bucket, backend_error):
self.backend = bucket.backend
self.url = bucket.url
self.backend_msg = f"{type(backend_error).__name__}: {backend_error}"
def __str__(self):
return f"{self.backend} backend ({self.url}) raised {self.backend_msg}"
class StorageBucket:
"""
Deconstructs a URL about an S3 bucket and breaks it into parts that
can be used for various purposes. Also, contains a convenient method
for getting a boto3 s3 client instance ultimately based on the URL.
Usage::
>>> s = StorageBucket(
... 'https://s3-us-west-2.amazonaws.com/bucky/prfx?access=public'
)
>>> s.netloc
's3-us-west-2.amazonaws.com'
>>> s.name
'bucky'
>>> s.private # note, private is usually default
False
>>> s.prefix
'prfx'
>>> s.client.list_objects_v2(Bucket=s.name, Prefix='some/key.ext')
"""
# A substring match of the domain is used to recognize storage backends.
# For emulated backends, the name should be present in the docker-compose
# service name.
_URL_FINGERPRINT = {
# AWS S3, like bucket-name.s3.amazonaws.com
"s3": ".amazonaws.com",
# Localstack S3 Emulator
"emulated-s3": "localstack",
# S3 test domain
"test-s3": "s3.example.com",
}
def __init__(self, url, try_symbols=False, file_prefix=""):
self.url = url
parsed = urlparse(url)
self.scheme = parsed.scheme
self.netloc = parsed.netloc
# Determine the backend from the netloc (domain plus port)
self.backend = None
for backend, fingerprint in self._URL_FINGERPRINT.items():
if fingerprint in self.netloc:
self.backend = backend
break
if self.backend is None:
raise ValueError(f"Storage backend not recognized in {url!r}")
try:
name, prefix = parsed.path[1:].split("/", 1)
if prefix.endswith("/"):
prefix = prefix[:-1]
except ValueError:
prefix = ""
name = parsed.path[1:]
self.name = name
if file_prefix:
if prefix:
prefix += f"/{file_prefix}"
else:
prefix = file_prefix
self.prefix = prefix
self.private = "access=public" not in parsed.query
self.try_symbols = try_symbols
self.endpoint_url = None
self.region = None
if not self.backend == "s3":
# the endpoint_url will be all but the path
self.endpoint_url = f"{parsed.scheme}://{parsed.netloc}"
region = re.findall(r"s3-(.*)\.amazonaws\.com", parsed.netloc)
if region:
if region[0] not in ALL_POSSIBLE_S3_REGIONS:
raise ValueError(f"Not valid S3 region {region[0]}")
self.region = region[0]
@property
def base_url(self):
"""Return the URL by its domain and bucket name"""
return f"{self.scheme}://{self.netloc}/{self.name}"
def __repr__(self):
return (
f"<{self.__class__.__name__} name={self.name!r} "
f"endpoint_url={self.endpoint_url!r} region={self.region!r} "
f"backend={self.backend!r}>"
)
@property
def client(self):
"""Return a backend-specific client, cached on first access.
TODO(jwhitlock): Build up StorageBucket API so users don't work directly with
the backend-specific clients (bug 1564452).
"""
if not getattr(self, "_client", None):
self._client = get_storage_client(
endpoint_url=self.endpoint_url, region_name=self.region
)
return self._client
def get_storage_client(self, **config_params):
"""Return a backend-specific client, overriding default config parameters.
TODO(jwhitlock): Build up StorageBucket API so users don't work directly with
the backend-specific clients (bug 1564452).
"""
return get_storage_client(
endpoint_url=self.endpoint_url, region_name=self.region, **config_params
)
def exists(self):
"""Check that the bucket exists in the backend.
:raises StorageError: An unexpected backed-specific error was raised.
:returns: True if the bucket exists, False if it does not
"""
# Use lower lookup timeouts on S3, to fail quickly when there are network issues
client = self.get_storage_client(
read_timeout=settings.S3_LOOKUP_READ_TIMEOUT,
connect_timeout=settings.S3_LOOKUP_CONNECT_TIMEOUT,
)
try:
client.head_bucket(Bucket=self.name)
except ClientError as error:
# A generic ClientError can be raised if:
# - The bucket doesn't exist (code 404)
# - The user doesn't have s3:ListBucket perm (code 403)
# - Other credential issues (code 403, maybe others)
if error.response["Error"]["Code"] == "404":
return False
else:
raise StorageError(self, error)
except BotoCoreError as error:
raise StorageError(self, error)
else:
return True
def get_storage_client(endpoint_url=None, region_name=None, **config_params):
options = {"config": Config(**config_params)}
if endpoint_url:
# By default, if you don't specify an endpoint_url
# boto3 will automatically assume AWS's S3.
# For local development we are running a local S3
# fake service with localstack. Then we need to
# specify the endpoint_url.
options["endpoint_url"] = endpoint_url
if region_name:
options["region_name"] = region_name
session = boto3.session.Session()
return session.client("s3", **options)
|
mpl-2.0
|
5dd56481e06775452d02a37a200d112f
| 34.52459
| 88
| 0.599754
| 4.070758
| false
| false
| false
| false
|
cdr-stats/cdr-stats
|
cdr_stats/cdr_alert/functions_blacklist.py
|
1
|
3832
|
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.conf import settings
from cdr.functions_def import remove_prefix, prefix_list_string, get_country_id_prefix
from cdr_alert.models import Blacklist, Whitelist
from cdr_alert.tasks import blacklist_whitelist_notification
def chk_prefix_in_whitelist(prefix_list):
"""Check destination no with allowed prefix
>>> chk_prefix_in_whitelist([34, 345, 3456])
False
>>> chk_prefix_in_whitelist('')
False
"""
if not prefix_list:
return False
white_prefix_list = Whitelist.objects.all()
flag = False
if white_prefix_list:
for j in eval(prefix_list):
for i in white_prefix_list:
# Allowed Prefix
if i.phonenumber_prefix == j:
flag = True
break
# if flag is true - allowed
if flag:
# notice_type = 4 whitelist
blacklist_whitelist_notification.delay(4)
return True
# no whitelist define
return False
def chk_prefix_in_blacklist(prefix_list):
"""Check destination no with ban prefix
>>> chk_prefix_in_blacklist([34, 345, 3456])
True
>>> chk_prefix_in_blacklist([])
True
"""
if not prefix_list:
return True
banned_prefix_list = Blacklist.objects.all()
flag = False
if banned_prefix_list:
for j in eval(prefix_list):
for i in banned_prefix_list:
# Banned Prefix
if i.phonenumber_prefix == j:
flag = True
break
# if flag is true - not allowed
if flag:
# notice_type = 3 blacklist
blacklist_whitelist_notification.delay(3)
return False
# no blacklist is defined
return True
def verify_auth_dest_number(destination_number):
"""
>>> verify_auth_dest_number('1234567890')
{
'authorized': 0,
'country_id': 0,
'prefix_id': 0,
}
"""
# remove prefix
sanitized_destination = remove_prefix(destination_number, settings.PREFIX_TO_IGNORE)
prefix_list = prefix_list_string(sanitized_destination)
authorized = 1 # default
# check destion against whitelist
authorized = chk_prefix_in_whitelist(prefix_list)
if authorized:
authorized = 1
else:
# check against blacklist
authorized = chk_prefix_in_blacklist(prefix_list)
if not authorized:
# not allowed destination
authorized = 0
if (len(sanitized_destination) < settings.PN_MIN_DIGITS
or sanitized_destination[:1].isalpha()):
# It might be an extension
country_id = None
prefix_id = None
elif (prefix_list
and len(sanitized_destination) >= settings.PN_MIN_DIGITS
and len(sanitized_destination) <= settings.PN_MAX_DIGITS):
# It might be an local call
# Need to add coma for get_country_id_prefix to eval correctly
prefix_list = prefix_list_string(str(settings.LOCAL_DIALCODE) + sanitized_destination)
(country_id, prefix_id) = get_country_id_prefix(prefix_list)
else:
# International call
(country_id, prefix_id) = get_country_id_prefix(prefix_list)
destination_data = {
'authorized': authorized,
'country_id': country_id,
'prefix_id': prefix_id,
}
return destination_data
|
mpl-2.0
|
26ccc11d45b55f4062dcdc13093806de
| 28.705426
| 94
| 0.607516
| 3.966874
| false
| false
| false
| false
|
cdr-stats/cdr-stats
|
cdr_stats/mod_utils/management.py
|
3
|
2097
|
# -*- coding: utf-8 -*-
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
from user_profile.constants import NOTICE_TYPE
# Info about management.py
# http://stackoverflow.com/questions/4455533/what-is-management-py-in-django
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
def create_notice_types(app, created_models, verbosity, **kwargs):
kwargs = {}
kwargs['default'] = NOTICE_TYPE.average_length_of_call
notification.create_notice_type("average_length_of_call",
_("ALOC (average length of call)"),
_("average length of call"), **kwargs)
kwargs['default'] = NOTICE_TYPE.answer_seize_ratio
notification.create_notice_type("answer_seize_ratio",
_("ASR (answer seize ratio)"),
_("answer seize ratio"), **kwargs)
kwargs['default'] = NOTICE_TYPE.blacklist_prefix
notification.create_notice_type("blacklist_prefix",
_("Blacklist Prefix"),
_("blacklist prefix"), **kwargs)
kwargs['default'] = NOTICE_TYPE.whitelist_prefix
notification.create_notice_type("whitelist_prefix",
_("Whitelist Prefix"),
_("whitelist prefix"), **kwargs)
signals.post_syncdb.connect(create_notice_types, sender=notification)
else:
print "Skipping creation of NoticeTypes as notification app not found"
|
mpl-2.0
|
b1669061ba03bb6626092e619cc80339
| 41.795918
| 78
| 0.606104
| 4.359667
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
bitmovin/services/encodings/generic_muxing_service.py
|
1
|
1943
|
from bitmovin.errors import MissingArgumentError, BitmovinApiError, InvalidStatusError
from bitmovin.resources import ResourceResponse, Status
from bitmovin.resources.models import EncodingStatus
from bitmovin.services.rest_service import RestService
class GenericMuxingService(RestService):
BASE_ENDPOINT_URL = 'encoding/encodings/{encoding_id}/muxings/{type}'
def __init__(self, http_client, type_url, resource_class):
if not type_url:
raise MissingArgumentError('type_url must be given')
if not resource_class:
raise MissingArgumentError('resource_class must be given')
self.type_url = type_url
self.resource_class = resource_class
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=self.resource_class)
def _get_endpoint_url(self, encoding_id):
if not encoding_id:
raise MissingArgumentError('encoding_id must be given')
return self.BASE_ENDPOINT_URL.replace('{encoding_id}', encoding_id).replace('{type}', self.type_url)
def create(self, object_, encoding_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().create(object_)
def delete(self, encoding_id, muxing_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().delete(id_=muxing_id)
def retrieve(self, encoding_id, muxing_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().retrieve(id_=muxing_id)
def list(self, encoding_id, offset=None, limit=None):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().list(offset, limit)
def retrieve_custom_data(self, encoding_id, muxing_id):
self.relative_url = self._get_endpoint_url(encoding_id=encoding_id)
return super().retrieve_custom_data(id_=muxing_id)
|
unlicense
|
e78d2d78893763e12be5459477d49499
| 45.261905
| 114
| 0.696861
| 3.736538
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/filters/enhanced_watermark_filter.py
|
1
|
2080
|
from bitmovin.resources.enums import WatermarkUnit
from bitmovin.errors import InvalidTypeError
from bitmovin.utils import Serializable
from .abstract_filter import AbstractFilter
class EnhancedWatermarkFilter(AbstractFilter, Serializable):
def __init__(self, image, id_=None, left=None, right=None, top=None, bottom=None, unit=None, name=None,
custom_data=None, description=None, opacity=None):
super().__init__(id_=id_, name=name, custom_data=custom_data, description=description)
self.image = image
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.opacity = opacity
self._unit = None
self.unit = unit
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, new_unit):
if new_unit is None:
self._unit = None
return
if isinstance(new_unit, str):
self._unit = new_unit
elif isinstance(new_unit, WatermarkUnit):
self._unit = new_unit.value
else:
raise InvalidTypeError(
'Invalid type {} for unit: must be either str or WatermarkUnit!'.format(type(new_unit)))
def serialize(self):
serialized = super().serialize()
serialized['unit'] = self.unit
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
image = json_object['image']
left = json_object.get('left')
right = json_object.get('right')
top = json_object.get('top')
bottom = json_object.get('bottom')
name = json_object.get('name')
description = json_object.get('description')
unit = json_object.get('unit')
opacity = json_object.get('opacity')
watermark_filter = EnhancedWatermarkFilter(
image=image, left=left, right=right, top=top, bottom=bottom, id_=id_, name=name, description=description,
unit=unit, opacity=opacity)
return watermark_filter
|
unlicense
|
12e08ce533692ec9a135d204d1ea16db
| 34.254237
| 117
| 0.615865
| 4.038835
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
tests/bitmovin/services/manifests/dash/adaptationset_tests.py
|
1
|
8723
|
import unittest
import uuid
from bitmovin import Bitmovin, DashManifest, ACLEntry, ACLPermission, EncodingOutput, Period, AudioAdaptationSet, \
VideoAdaptationSet, SubtitleAdaptationSet, AbstractAdaptationSet
from tests.bitmovin import BitmovinTestCase
class AdaptationSetTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_add_audio_adaptationset(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_audio_adaptationset = self._get_sample_audio_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_audio_adaptation_set(
object_=sample_audio_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_audio_adaptationsets(sample_audio_adaptationset, adaptationset_resource_response.resource)
def test_add_video_adaptationset(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_video_adaptationset = self._get_sample_video_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_video_adaptation_set(
object_=sample_video_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_video_adaptationsets(sample_video_adaptationset, adaptationset_resource_response.resource)
def test_add_subtitle_adaptationset(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_subtitle_adaptationset = self._get_sample_subtitle_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_subtitle_adaptation_set(
object_=sample_subtitle_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_subtitle_adaptationsets(sample_subtitle_adaptationset, adaptationset_resource_response.resource)
def _compare_manifests(self, first: DashManifest, second: DashManifest):
self.assertEqual(first.manifestName, second.manifestName)
self.assertEqual(first.description, second.description)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _compare_periods(self, first: Period, second: Period):
self.assertEqual(first.start, second.start)
self.assertEqual(first.duration, second.duration)
return True
def _compare_adaptationsets(self, first: AbstractAdaptationSet, second: AbstractAdaptationSet):
return True
def _compare_audio_adaptationsets(self, first: AudioAdaptationSet, second: AudioAdaptationSet):
self._compare_adaptationsets(first, second)
self.assertEqual(first.lang, second.lang)
return True
def _compare_video_adaptationsets(self, first: VideoAdaptationSet, second: VideoAdaptationSet):
self._compare_adaptationsets(first, second)
return True
def _compare_subtitle_adaptationsets(self, first: SubtitleAdaptationSet, second: SubtitleAdaptationSet):
self._compare_adaptationsets(first, second)
self.assertEqual(first.lang, second.lang)
return True
def _get_sample_manifest(self):
encoding_output = self._get_sample_encoding_output()
manifest = DashManifest(manifest_name='bitmovin-python_Sample_DASH_Manifest.mpd', outputs=[encoding_output],
name='Sample DASH Manifest')
self.assertIsNotNone(manifest)
self.assertIsNotNone(manifest.manifestName)
self.assertIsNotNone(manifest.outputs)
return manifest
def _get_sample_encoding_output(self):
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/'+str(uuid.uuid4()),
acl=[acl_entry])
return encoding_output
def _get_sample_period_default(self):
period = Period()
return period
def _get_special_period(self):
period = self._get_sample_period_default()
period.start = 1.33
period.duration = 67.3
return period
def _get_sample_audio_adaptationset(self):
audio_adaptationset = AudioAdaptationSet(lang='en')
return audio_adaptationset
def _get_sample_video_adaptationset(self):
video_adaptationset = VideoAdaptationSet()
return video_adaptationset
def _get_sample_subtitle_adaptationset(self):
subtitle_adaptationset = SubtitleAdaptationSet(lang='en')
return subtitle_adaptationset
if __name__ == '__main__':
unittest.main()
|
unlicense
|
350cfd6b2cbd32908cb7e8fc54e6c955
| 49.132184
| 122
| 0.708586
| 4.140009
| false
| true
| false
| false
|
bitmovin/bitmovin-python
|
examples/encoding/live/live_to_vod.py
|
1
|
9125
|
import datetime
from bitmovin import Bitmovin, S3Output, EncodingOutput, ACLEntry, ACLPermission, DashManifest, FMP4Representation, \
FMP4RepresentationType, Period, VideoAdaptationSet, AudioAdaptationSet, HlsManifest, AudioMedia, VariantStream
# IMPORTANT: first run start_live_encoding_dash_hls.py to get the ids
API_KEY = '<YOUR_API_KEY>'
S3_OUTPUT_ACCESS_KEY = '<YOUR_S3_ACCESS_KEY>'
S3_OUTPUT_SECRET_KEY = '<YOUR_S3_SECRET_KEY>'
S3_OUTPUT_BUCKET_NAME = '<YOUR_S3_BUCKET_NAME>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'your/output/base/path/{}/'.format(date_component)
# INPUT INFORMATION FROM LIVE STREAM
ENCODING_ID = 'COPY AND PASTE'
STREAM_1080P_ID = 'COPY AND PASTE'
STREAM_720P_ID = 'COPY AND PASTE'
STREAM_480P_ID = 'COPY AND PASTE'
STREAM_360P_ID = 'COPY AND PASTE'
STREAM_240P_ID = 'COPY AND PASTE'
STREAM_AUDIO_ID = 'COPY AND PASTE'
FMP4_MUXING_1080P_ID = 'COPY AND PASTE'
FMP4_MUXING_720P_ID = 'COPY AND PASTE'
FMP4_MUXING_480P_ID = 'COPY AND PASTE'
FMP4_MUXING_360P_ID = 'COPY AND PASTE'
FMP4_MUXING_240P_ID = 'COPY AND PASTE'
FMP4_MUXING_AUDIO_ID = 'COPY AND PASTE'
TS_MUXING_1080P_ID = 'COPY AND PASTE'
TS_MUXING_720P_ID = 'COPY AND PASTE'
TS_MUXING_480P_ID = 'COPY AND PASTE'
TS_MUXING_360P_ID = 'COPY AND PASTE'
TS_MUXING_240P_ID = 'COPY AND PASTE'
TS_MUXING_AUDIO_ID = 'COPY AND PASTE'
# set the start segment number. If it is set to None the first segment is taken as start segment
START_SEGMENT = None
# set the end segment number. If it is set to None the last segment is taken as end segment
END_SEGMENT = None
AUDIO_REPRESENTATIONS = [
{
'fmp4_muxing_id': FMP4_MUXING_AUDIO_ID,
'ts_muxing_id': TS_MUXING_AUDIO_ID,
'stream_id': STREAM_AUDIO_ID,
'dash_segment_path': 'audio/128kbps/dash',
'hls_segment_path': 'audio/128kbps/hls',
'hls_variant_uri': 'audio_vod.m3u8'
},
]
VIDEO_REPRESENTATIONS = [
{
'fmp4_muxing_id': FMP4_MUXING_1080P_ID,
'ts_muxing_id': TS_MUXING_1080P_ID,
'stream_id': STREAM_1080P_ID,
'dash_segment_path': 'video/1080p/dash',
'hls_segment_path': 'video/1080p/hls',
'hls_variant_uri': 'video_1080p_vod.m3u8'
},
{
'fmp4_muxing_id': FMP4_MUXING_720P_ID,
'ts_muxing_id': TS_MUXING_720P_ID,
'stream_id': STREAM_720P_ID,
'dash_segment_path': 'video/720p/dash',
'hls_segment_path': 'video/720p/hls',
'hls_variant_uri': 'video_720p_vod.m3u8'
},
{
'fmp4_muxing_id': FMP4_MUXING_480P_ID,
'ts_muxing_id': TS_MUXING_480P_ID,
'stream_id': STREAM_480P_ID,
'dash_segment_path': 'video/480p/dash',
'hls_segment_path': 'video/480p/hls',
'hls_variant_uri': 'video_480p_vod.m3u8'
},
{
'fmp4_muxing_id': FMP4_MUXING_360P_ID,
'ts_muxing_id': TS_MUXING_360P_ID,
'stream_id': STREAM_360P_ID,
'dash_segment_path': 'video/360p/dash',
'hls_segment_path': 'video/360p/hls',
'hls_variant_uri': 'video_360p_vod.m3u8'
},
{
'fmp4_muxing_id': FMP4_MUXING_240P_ID,
'ts_muxing_id': TS_MUXING_240P_ID,
'stream_id': STREAM_240P_ID,
'dash_segment_path': 'video/240p/dash',
'hls_segment_path': 'video/240p/hls',
'hls_variant_uri': 'video_240p_vod.m3u8'
},
]
def main():
bitmovin = Bitmovin(api_key=API_KEY)
output = S3Output(access_key=S3_OUTPUT_ACCESS_KEY,
secret_key=S3_OUTPUT_SECRET_KEY,
bucket_name=S3_OUTPUT_BUCKET_NAME,
name='Sample S3 Output')
output = bitmovin.outputs.S3.create(output).resource
manifest_output = EncodingOutput(output_id=output.id,
output_path=OUTPUT_BASE_PATH,
acl=[ACLEntry(permission=ACLPermission.PUBLIC_READ)])
# Create a DASH manifest
dash_manifest = DashManifest(manifest_name='stream_vod.mpd',
outputs=[manifest_output],
name='stream_vod.mpd')
dash_manifest = bitmovin.manifests.DASH.create(dash_manifest).resource
period = Period()
period = bitmovin.manifests.DASH.add_period(object_=period, manifest_id=dash_manifest.id).resource
video_adaptation_set = VideoAdaptationSet()
video_adaptation_set = bitmovin.manifests.DASH.add_video_adaptation_set(object_=video_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
audio_adaptation_set = AudioAdaptationSet(lang='en')
audio_adaptation_set = bitmovin.manifests.DASH.add_audio_adaptation_set(object_=audio_adaptation_set,
manifest_id=dash_manifest.id,
period_id=period.id).resource
for representation in AUDIO_REPRESENTATIONS:
audio_representation = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=ENCODING_ID,
muxing_id=representation['fmp4_muxing_id'],
segment_path=representation['dash_segment_path'],
start_segment_number=START_SEGMENT,
end_segment_number=END_SEGMENT)
bitmovin.manifests.DASH.add_fmp4_representation(object_=audio_representation,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=audio_adaptation_set.id)
for representation in VIDEO_REPRESENTATIONS:
video_representation = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=ENCODING_ID,
muxing_id=representation['fmp4_muxing_id'],
segment_path=representation['dash_segment_path'],
start_segment_number=START_SEGMENT,
end_segment_number=END_SEGMENT)
bitmovin.manifests.DASH.add_fmp4_representation(object_=video_representation,
manifest_id=dash_manifest.id,
period_id=period.id,
adaptationset_id=video_adaptation_set.id)
# Create a HLS manifest
hls_manifest = HlsManifest(manifest_name='stream_vod.m3u8',
outputs=[manifest_output],
name='stream_vod.m3u8')
hls_manifest = bitmovin.manifests.HLS.create(object_=hls_manifest).resource
for representation in AUDIO_REPRESENTATIONS:
hls_audio_media = AudioMedia(name='en', group_id='audio_group',
segment_path=representation['hls_segment_path'],
encoding_id=ENCODING_ID,
stream_id=representation['stream_id'],
muxing_id=representation['ts_muxing_id'],
language='en',
uri=representation['hls_variant_uri'],
start_segment_number=START_SEGMENT,
end_segment_number=END_SEGMENT)
bitmovin.manifests.HLS.AudioMedia.create(manifest_id=hls_manifest.id,
object_=hls_audio_media)
for representation in VIDEO_REPRESENTATIONS:
hls_variant_stream = VariantStream(audio="audio_group",
segment_path=representation['hls_segment_path'],
encoding_id=ENCODING_ID,
stream_id=representation['stream_id'],
muxing_id=representation['ts_muxing_id'],
uri=representation['hls_variant_uri'],
start_segment_number=START_SEGMENT,
end_segment_number=END_SEGMENT)
bitmovin.manifests.HLS.VariantStream.create(manifest_id=hls_manifest.id,
object_=hls_variant_stream)
# start manifest creation.
bitmovin.manifests.DASH.start(manifest_id=dash_manifest.id)
bitmovin.manifests.HLS.start(manifest_id=hls_manifest.id)
if __name__ == '__main__':
main()
|
unlicense
|
e27db030f4584127fe6cfce88172e6f1
| 45.319797
| 117
| 0.540164
| 3.672032
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/filters/scale_filter.py
|
1
|
1998
|
from bitmovin.utils import Serializable
from bitmovin.errors import InvalidTypeError
from bitmovin.resources.enums import ScalingAlgorithm
from . import AbstractFilter
class ScaleFilter(AbstractFilter, Serializable):
def __init__(self, name=None, width=None, height=None, scaling_algorithm=None, id_=None, custom_data=None,
description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self.width = width
self.height = height
self._scaling_algorithm = None
self.scalingAlgorithm = scaling_algorithm
@property
def scalingAlgorithm(self):
return self._scaling_algorithm
@scalingAlgorithm.setter
def scalingAlgorithm(self, new_value):
if new_value is None:
self._scaling_algorithm = None
return
if isinstance(new_value, str):
self._scaling_algorithm = new_value
elif isinstance(new_value, ScalingAlgorithm):
self._scaling_algorithm = new_value.value
else:
raise InvalidTypeError(
'Invalid type {} for scalingAlgorithm: ' +
'must be either str or ScalingAlgorithm!'.format(type(new_value)))
def serialize(self):
serialized = super().serialize()
serialized['scalingAlgorithm'] = self.scalingAlgorithm
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
scaling_algorithm = json_object.get('scalingAlgorithm')
width = json_object.get('width')
height = json_object.get('height')
name = json_object.get('name')
description = json_object.get('description')
scale_filter = ScaleFilter(
name=name,
width=width,
height=height,
scaling_algorithm=scaling_algorithm,
id_=id_,
description=description
)
return scale_filter
|
unlicense
|
31577e029af806fb9d6a7631a38b635b
| 33.448276
| 110
| 0.629129
| 4.5
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/manifests/hls/vtt_media.py
|
1
|
1338
|
from .abstract_media import AbstractMedia
class VttMedia(AbstractMedia):
def __init__(self, name, group_id, vtt_url, language=None, assoc_language=None, is_default=None, autoselect=None,
characteristics=None, id_=None, uri=None):
super().__init__(id_=id_, name=name, group_id=group_id, language=language, assoc_language=assoc_language,
is_default=is_default, autoselect=autoselect, characteristics=characteristics)
self.vttUrl = vtt_url
self.uri = uri
@classmethod
def parse_from_json_object(cls, json_object):
media = super().parse_from_json_object(json_object=json_object)
id_ = media.id
name = media.name
group_id = media.groupId
language = media.language
assoc_language = media.assocLanguage
is_default = media.isDefault
autoselect = media.autoselect
characteristics = media.characteristics
vtt_url = json_object.get('vttUrl')
uri = json_object.get('uri')
vtt_media = VttMedia(id_=id_, name=name, group_id=group_id, language=language, assoc_language=assoc_language,
is_default=is_default, autoselect=autoselect, characteristics=characteristics,
vtt_url=vtt_url, uri=uri)
return vtt_media
|
unlicense
|
9d4d51bd1ad7bd5456b0b9db2815ca54
| 42.16129
| 117
| 0.633782
| 3.758427
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
tests/bitmovin/services/encodings/stream_tests.py
|
1
|
13418
|
import unittest
import uuid
import json
from bitmovin import Bitmovin, Response, Stream, StreamInput, StreamMetadata, EncodingOutput, ACLEntry, Encoding, \
ACLPermission, SelectionMode, StreamMode, StreamDecodingErrorMode
from bitmovin.errors import BitmovinApiError
from bitmovin.resources.models.encodings.conditions import AndConjunction, OrConjunction, Condition
from tests.bitmovin import BitmovinTestCase
class EncodingStreamTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
self.sampleEncoding = self._create_sample_encoding() # type: Encoding
def tearDown(self):
super().tearDown()
def test_create_stream(self):
sample_stream = self._get_sample_stream()
stream_resource_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(stream_resource_response)
self.assertIsNotNone(stream_resource_response.resource)
self.assertIsNotNone(stream_resource_response.resource.id)
self._compare_streams(sample_stream, stream_resource_response.resource)
def test_create_stream_without_name(self):
sample_stream = self._get_sample_stream()
sample_stream.name = None
stream_resource_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(stream_resource_response)
self.assertIsNotNone(stream_resource_response.resource)
self.assertIsNotNone(stream_resource_response.resource.id)
self._compare_streams(sample_stream, stream_resource_response.resource)
def test_create_stream_with_metadata(self):
sample_stream = self._get_sample_stream()
sample_stream.name = None
stream_metadata = StreamMetadata(language='eng')
sample_stream.metadata = stream_metadata
self.assertIsNotNone(sample_stream.metadata)
stream_resource_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(stream_resource_response)
self.assertIsNotNone(stream_resource_response.resource)
self.assertIsNotNone(stream_resource_response.resource.id)
self._compare_streams(sample_stream, stream_resource_response.resource)
def test_create_stream_per_title_fixed_resolution(self):
sample_stream = self._get_sample_stream()
sample_stream.mode = StreamMode.PER_TITLE_TEMPLATE_FIXED_RESOLUTION
stream_resource_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(stream_resource_response)
self.assertIsNotNone(stream_resource_response.resource)
self.assertIsNotNone(stream_resource_response.resource.id)
self._compare_streams(sample_stream, stream_resource_response.resource)
self.assertEqual(StreamMode.PER_TITLE_TEMPLATE_FIXED_RESOLUTION.value, stream_resource_response.resource.mode)
def test_retrieve_stream(self):
sample_stream = self._get_sample_stream()
created_stream_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(created_stream_response)
self.assertIsNotNone(created_stream_response.resource)
self.assertIsNotNone(created_stream_response.resource.id)
self._compare_streams(sample_stream, created_stream_response.resource)
retrieved_stream_response = self.bitmovin.encodings.Stream.retrieve(
stream_id=created_stream_response.resource.id,
encoding_id=self.sampleEncoding.id
)
self.assertIsNotNone(retrieved_stream_response)
self.assertIsNotNone(retrieved_stream_response.resource)
self._compare_streams(created_stream_response.resource, retrieved_stream_response.resource)
@unittest.skip('Currently there is no route for stream deletion')
def test_delete_stream(self):
sample_stream = self._get_sample_stream()
created_stream_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(created_stream_response)
self.assertIsNotNone(created_stream_response.resource)
self.assertIsNotNone(created_stream_response.resource.id)
self._compare_streams(sample_stream, created_stream_response.resource)
deleted_minimal_resource = self.bitmovin.encodings.Stream.delete(stream_id=created_stream_response.resource.id,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.encodings.Stream.retrieve(created_stream_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving stream after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_streams(self):
sample_stream = self._get_sample_stream()
created_stream_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(created_stream_response)
self.assertIsNotNone(created_stream_response.resource)
self.assertIsNotNone(created_stream_response.resource.id)
self._compare_streams(sample_stream, created_stream_response.resource)
streams = self.bitmovin.encodings.Stream.list(encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(streams)
self.assertIsNotNone(streams.resource)
self.assertIsNotNone(streams.response)
self.assertIsInstance(streams.resource, list)
self.assertIsInstance(streams.response, Response)
self.assertGreater(streams.resource.__sizeof__(), 1)
def test_retrieve_stream_custom_data(self):
sample_stream = self._get_sample_stream()
sample_stream.customData = '<pre>my custom data</pre>'
created_stream_response = self.bitmovin.encodings.Stream.create(object_=sample_stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(created_stream_response)
self.assertIsNotNone(created_stream_response.resource)
self.assertIsNotNone(created_stream_response.resource.id)
self._compare_streams(sample_stream, created_stream_response.resource)
custom_data_response = self.bitmovin.encodings.Stream.retrieve_custom_data(
stream_id=created_stream_response.resource.id,
encoding_id=self.sampleEncoding.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_stream.customData, json.loads(custom_data.customData))
def _compare_streams(self, first: Stream, second: Stream):
"""
:param first: Stream
:param second: Stream
:return: bool
"""
self.assertEqual(first.codecConfigId, second.codecConfigId)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.mode, second.mode)
if first.inputStreams:
self.assertEqual(len(first.inputStreams), len(second.inputStreams))
if first.outputs:
self.assertEqual(len(first.outputs), len(second.outputs))
if first.conditions:
self._assertEqualConditions(first.conditions, second.conditions)
if first.metadata:
self._assertEqualConditions(first.metadata, second.metadata)
return True
def _get_sample_stream(self):
sample_codec_configuration = self.utils.get_sample_h264_codec_configuration()
h264_codec_configuration = self.bitmovin.codecConfigurations.H264.create(sample_codec_configuration)
conditions = self._get_sample_conditions()
(sample_input, sample_files) = self.utils.get_sample_s3_input()
s3_input = self.bitmovin.inputs.S3.create(sample_input)
stream_input = StreamInput(input_id=s3_input.resource.id,
input_path=sample_files.get('854b9c98-17b9-49ed-b75c-3b912730bfd1'),
selection_mode=SelectionMode.AUTO)
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/'+str(uuid.uuid4()),
acl=[acl_entry])
stream = Stream(codec_configuration_id=h264_codec_configuration.resource.id,
input_streams=[stream_input],
outputs=[encoding_output],
name='Sample Stream',
conditions=conditions,
mode=StreamMode.PER_TITLE_TEMPLATE,
decoding_error_mode=StreamDecodingErrorMode.FAIL_ON_ERROR)
self.assertEqual(stream.codecConfigId, h264_codec_configuration.resource.id)
self.assertEqual(stream.inputStreams, [stream_input])
self.assertEqual(stream.outputs, [encoding_output])
self.assertEqual(stream.conditions, conditions)
self.assertEqual(stream.mode, StreamMode.PER_TITLE_TEMPLATE.value)
self.assertEqual(stream.decodingErrorMode, StreamDecodingErrorMode.FAIL_ON_ERROR.value)
return stream
def _get_sample_conditions(self):
bitrate_condition = Condition(attribute='BITRATE', operator='!=', value='2000000')
fps_condition = Condition(attribute='FPS', operator='==', value='24')
or_conjunctions = [bitrate_condition, fps_condition]
sub_condition_or = OrConjunction(conditions=or_conjunctions)
height_condition_condition = Condition(attribute='HEIGHT', operator='<=', value='400')
and_conditions = [sub_condition_or, height_condition_condition]
and_conjunction = AndConjunction(conditions=and_conditions)
return and_conjunction
def _create_sample_encoding(self):
sample_encoding = self.utils.get_sample_encoding()
resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
return resource_response.resource
def _assertEqualConditions(self, first, second):
if first is None and second is None:
return True
if first is not None and second is None:
raise self.failureException('second condition is none but not first')
if first is None and second is not None:
raise self.failureException('first condition is none but not second')
if isinstance(first, Condition):
if isinstance(second, Condition):
if first.attribute != second.attribute:
raise self.failureException('first.attribute is {}, second.attribute is {}'.format(
first.attribute, second.attribute))
if first.operator != second.operator:
raise self.failureException('first.operator is {}, second.operator is {}'.format(
first.operator, second.operator))
if first.value != second.value:
raise self.failureException('first.value is {}, second.value is {}'.format(
first.value, second.value))
else:
raise self.failureException('first is {}, second is {}'.format(type(first), type(second)))
if isinstance(first, OrConjunction):
if isinstance(second, OrConjunction):
self.assertEqual(len(first.conditions), len(second.conditions))
else:
raise self.failureException('first is {}, second is {}'.format(type(first), type(second)))
if isinstance(first, StreamMetadata):
if isinstance(second, StreamMetadata):
self.assertEqual(first.language, second.language)
else:
raise self.failureException('first is {}, second is {}'.format(type(first), type(second)))
if __name__ == '__main__':
unittest.main()
|
unlicense
|
66af6e0e4c8a13bf14ed7407a3f09f36
| 49.443609
| 119
| 0.650395
| 4.478638
| false
| true
| false
| false
|
bitmovin/bitmovin-python
|
tests/bitmovin/services/encodings/encoding_tests.py
|
1
|
9567
|
import unittest
import json
from bitmovin import Bitmovin, Response, Encoding, CloudRegion
from bitmovin.errors import BitmovinApiError
from bitmovin.resources.enums.encoding_status_values import EncodingStatusValues
from tests.bitmovin import BitmovinTestCase
class EncodingTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_encoding(self):
sample_encoding = self._get_sample_encoding()
encoding_resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(encoding_resource_response)
self.assertIsNotNone(encoding_resource_response.resource)
self.assertIsNotNone(encoding_resource_response.resource.id)
self._compare_encodings(sample_encoding, encoding_resource_response.resource)
def test_create_encoding_with_infrastructure(self):
sample_infrastructure = self.utils.get_sample_infrastructure()
self.assertIsNotNone(sample_infrastructure.cloudRegion)
self.assertIsNotNone(sample_infrastructure.infrastructureId)
sample_encoding = self._get_sample_encoding(CloudRegion.EXTERNAL, infrastructure=sample_infrastructure)
encoding_resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(encoding_resource_response)
self.assertIsNotNone(encoding_resource_response.resource)
self.assertIsNotNone(encoding_resource_response.resource.id)
self._compare_encodings(sample_encoding, encoding_resource_response.resource)
infrastructure_response = encoding_resource_response.resource.infrastructure
self.assertIsNotNone(infrastructure_response)
self.assertIsNotNone(infrastructure_response.infrastructureId)
self.assertIsNotNone(infrastructure_response.cloudRegion)
self.assertEqual(sample_infrastructure.cloudRegion, infrastructure_response.cloudRegion)
self.assertEqual(sample_infrastructure.infrastructureId, infrastructure_response.infrastructureId)
def test_retrieve_encoding(self):
sample_encoding = self._get_sample_encoding()
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
retrieved_encoding_response = self.bitmovin.encodings.Encoding.retrieve(created_encoding_response.resource.id)
self.assertIsNotNone(retrieved_encoding_response)
self.assertIsNotNone(retrieved_encoding_response.resource)
self._compare_encodings(created_encoding_response.resource, retrieved_encoding_response.resource)
def test_delete_encoding(self):
sample_encoding = self._get_sample_encoding()
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
deleted_minimal_resource = self.bitmovin.encodings.Encoding.delete(created_encoding_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.encodings.Encoding.retrieve(created_encoding_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving encoding after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_get_encoding_status(self):
sample_encoding = self._get_sample_encoding()
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
encoding_status = self.bitmovin.encodings.Encoding.status(created_encoding_response.resource.id)
self.assertIsNotNone(encoding_status)
self.assertEqual(encoding_status.resource.status, EncodingStatusValues.CREATED.value)
def test_list_encodings(self):
sample_encoding = self._get_sample_encoding()
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
encodings = self.bitmovin.encodings.Encoding.list()
self.assertIsNotNone(encodings)
self.assertIsNotNone(encodings.resource)
self.assertIsNotNone(encodings.response)
self.assertIsInstance(encodings.resource, list)
self.assertIsInstance(encodings.response, Response)
self.assertGreater(encodings.resource.__sizeof__(), 1)
def test_get_encodings_by_status(self):
sample_encoding = self._get_sample_encoding()
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
encodings = self.bitmovin.encodings.Encoding.filter_by_status(status=EncodingStatusValues.CREATED)
self.assertIsNotNone(encodings)
self.assertIsNotNone(encodings.resource)
self.assertIsNotNone(encodings.response)
self.assertIsInstance(encodings.resource, list)
self.assertIsInstance(encodings.response, Response)
self.assertGreater(encodings.resource.__sizeof__(), 1)
@unittest.skip("not yet implemented in bitmovin-python")
def test_retrieve_encoding_live_details(self):
sample_encoding = self._get_sample_encoding()
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
retrieved_encoding_live_details_response = \
self.bitmovin.encodings.Encoding.retrieve_live(created_encoding_response.resource.id)
def test_retrieve_encoding_custom_data(self):
sample_encoding = self._get_sample_encoding()
sample_encoding.customData = '<pre>my custom data</pre>'
created_encoding_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(created_encoding_response)
self.assertIsNotNone(created_encoding_response.resource)
self.assertIsNotNone(created_encoding_response.resource.id)
self._compare_encodings(sample_encoding, created_encoding_response.resource)
custom_data_response = self.bitmovin.encodings.Encoding.retrieve_custom_data(
id_=created_encoding_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_encoding.customData, json.loads(custom_data.customData))
def _compare_encodings(self, first: Encoding, second: Encoding):
"""
:param first: Encoding
:param second: Encoding
:return: bool
"""
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.encoderVersion, second.encoderVersion)
self.assertEqual(first.cloudRegion, second.cloudRegion)
self.assertCountEqual(first.labels, second.labels)
return True
def _get_sample_encoding(self, cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1, infrastructure=None):
encoding = Encoding(name='Sample Encoding bitmovin-python',
description='Sample encoding used in bitmovin-python API client tests',
cloud_region=cloud_region,
infrastructure=infrastructure,
labels=['label1', 'label2'])
self.assertIsNotNone(encoding.name)
self.assertIsNotNone(encoding.description)
self.assertIsNotNone(encoding.cloudRegion)
self.assertIsNotNone(encoding.labels)
self.assertIsInstance(encoding.labels, list)
self.assertIs(len(encoding.labels), 2)
return encoding
if __name__ == '__main__':
unittest.main()
|
unlicense
|
a532c478a8b86ac4d07393949f33597d
| 48.828125
| 118
| 0.726351
| 4.474743
| false
| true
| false
| false
|
bitmovin/bitmovin-python
|
bitmovin/resources/models/encodings/id3/id3_tag.py
|
1
|
2031
|
from bitmovin.errors import InvalidTypeError
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from bitmovin.utils import Serializable
from bitmovin.resources import ID3TagPositionMode
class ID3Tag(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, position_mode, time=None, frame=None, id_=None, custom_data=None, name=None, description=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._position_mode = None
self.positionMode = position_mode
self.time = time
self.frame = frame
@property
def positionMode(self):
return self._position_mode
@positionMode.setter
def positionMode(self, new_position_mode):
if new_position_mode is None:
return
if isinstance(new_position_mode, str):
self._position_mode = new_position_mode
elif isinstance(new_position_mode, ID3TagPositionMode):
self._position_mode = new_position_mode.value
else:
raise InvalidTypeError(
'Invalid type {} for position_mode: must be either str or ID3TagPositionMode!'.format(
type(new_position_mode)))
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object.get('id')
custom_data = json_object.get('customData')
name = json_object.get('name')
description = json_object.get('description')
position_mode = json_object.get('positionMode')
time = json_object.get('time')
frame = json_object.get('frame')
id3_tag = ID3Tag(id_=id_, custom_data=custom_data, name=name, description=description,
position_mode=position_mode, time=time, frame=frame)
return id3_tag
def serialize(self):
serialized = super().serialize()
serialized['positionMode'] = self.positionMode
return serialized
|
unlicense
|
275ee7d754d9f15ed05bf9f19191db98
| 37.320755
| 118
| 0.661251
| 4.128049
| false
| false
| false
| false
|
bitmovin/bitmovin-python
|
tests/bitmovin/services/manifests/dash/representation_tests.py
|
1
|
28339
|
import unittest
import uuid
from bitmovin import Bitmovin, DashManifest, ACLEntry, ACLPermission, EncodingOutput, Period, VideoAdaptationSet, \
AbstractAdaptationSet, FMP4Representation, FMP4RepresentationType, DRMFMP4Representation, Encoding, \
Stream, StreamInput, MuxingStream, FMP4Muxing, MarlinDRM, AbstractFMP4Representation, WebMRepresentation, \
WebMRepresentationType, DashMP4Representation, SubtitleAdaptationSet, VttRepresentation
from tests.bitmovin import BitmovinTestCase
class RepresentationTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
self.sampleEncoding = self._create_sample_encoding() # type: Encoding
self.sampleMuxing = self._create_sample_muxing() # type: FMP4Muxing
self.sampleDrm = self._create_sample_drm() # type: MarlinDRM
def tearDown(self):
super().tearDown()
def test_add_fmp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_adaptationset = self._get_sample_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_video_adaptation_set(
object_=sample_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_video_adaptationsets(sample_adaptationset, adaptationset_resource_response.resource)
sample_representation = self._get_sample_fmp4_representation()
representation_resource_response = self.bitmovin.manifests.DASH.add_fmp4_representation(
object_=sample_representation, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id, adaptationset_id=adaptationset_resource_response.resource.id
)
self.assertIsNotNone(representation_resource_response)
self.assertIsNotNone(representation_resource_response.resource)
self.assertIsNotNone(representation_resource_response.resource.id)
self._compare_fmp4_representations(sample_representation, representation_resource_response.resource)
def test_add_fmp4_representation_with_keyframes(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_adaptationset = self._get_sample_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_video_adaptation_set(
object_=sample_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_video_adaptationsets(sample_adaptationset, adaptationset_resource_response.resource)
sample_representation = self._get_sample_fmp4_representation_with_keyframes()
representation_resource_response = self.bitmovin.manifests.DASH.add_fmp4_representation(
object_=sample_representation, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id, adaptationset_id=adaptationset_resource_response.resource.id
)
self.assertIsNotNone(representation_resource_response)
self.assertIsNotNone(representation_resource_response.resource)
self.assertIsNotNone(representation_resource_response.resource.id)
self._compare_fmp4_representations(sample_representation, representation_resource_response.resource)
def test_add_drm_fmp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_adaptationset = self._get_sample_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_video_adaptation_set(
object_=sample_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_video_adaptationsets(sample_adaptationset, adaptationset_resource_response.resource)
sample_representation = self._get_sample_drm_fmp4_representation()
representation_resource_response = self.bitmovin.manifests.DASH.add_drm_fmp4_representation(
object_=sample_representation, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id, adaptationset_id=adaptationset_resource_response.resource.id
)
self.assertIsNotNone(representation_resource_response)
self.assertIsNotNone(representation_resource_response.resource)
self.assertIsNotNone(representation_resource_response.resource.id)
self._compare_drm_fmp4_representations(sample_representation, representation_resource_response.resource)
def test_add_webm_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_adaptationset = self._get_sample_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_video_adaptation_set(
object_=sample_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_video_adaptationsets(sample_adaptationset, adaptationset_resource_response.resource)
sample_representation = self._get_sample_webm_representation()
representation_resource_response = self.bitmovin.manifests.DASH.add_webm_representation(
object_=sample_representation, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id, adaptationset_id=adaptationset_resource_response.resource.id
)
self.assertIsNotNone(representation_resource_response)
self.assertIsNotNone(representation_resource_response.resource)
self.assertIsNotNone(representation_resource_response.resource.id)
self._compare_webm_representations(sample_representation, representation_resource_response.resource)
def test_add_mp4_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_adaptationset = self._get_sample_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_video_adaptation_set(
object_=sample_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_video_adaptationsets(sample_adaptationset, adaptationset_resource_response.resource)
sample_representation = self._get_sample_mp4_representation()
representation_resource_response = self.bitmovin.manifests.DASH.add_mp4_representation(
object_=sample_representation, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id, adaptationset_id=adaptationset_resource_response.resource.id
)
self.assertIsNotNone(representation_resource_response)
self.assertIsNotNone(representation_resource_response.resource)
self.assertIsNotNone(representation_resource_response.resource.id)
self._compare_mp4_representations(sample_representation, representation_resource_response.resource)
def test_add_vtt_representation(self):
sample_manifest = self._get_sample_manifest()
manifest_resource_response = self.bitmovin.manifests.DASH.create(sample_manifest)
self.assertIsNotNone(manifest_resource_response)
self.assertIsNotNone(manifest_resource_response.resource)
self.assertIsNotNone(manifest_resource_response.resource.id)
self._compare_manifests(sample_manifest, manifest_resource_response.resource)
sample_period = self._get_sample_period_default()
period_resource_response = self.bitmovin.manifests.DASH.add_period(
object_=sample_period, manifest_id=manifest_resource_response.resource.id)
self.assertIsNotNone(period_resource_response)
self.assertIsNotNone(period_resource_response.resource)
self.assertIsNotNone(period_resource_response.resource.id)
self._compare_periods(sample_period, period_resource_response.resource)
sample_adaptationset = self._get_sample_subtitle_adaptationset()
adaptationset_resource_response = self.bitmovin.manifests.DASH.add_subtitle_adaptation_set(
object_=sample_adaptationset, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id
)
self.assertIsNotNone(adaptationset_resource_response)
self.assertIsNotNone(adaptationset_resource_response.resource)
self.assertIsNotNone(adaptationset_resource_response.resource.id)
self._compare_subtitle_adaptationsets(sample_adaptationset, adaptationset_resource_response.resource)
sample_representation = self._get_sample_vtt_representation()
representation_resource_response = self.bitmovin.manifests.DASH.add_vtt_representation(
object_=sample_representation, manifest_id=manifest_resource_response.resource.id,
period_id=period_resource_response.resource.id, adaptationset_id=adaptationset_resource_response.resource.id
)
self.assertIsNotNone(representation_resource_response)
self.assertIsNotNone(representation_resource_response.resource)
self.assertIsNotNone(representation_resource_response.resource.id)
self._compare_vtt_representations(sample_representation, representation_resource_response.resource)
def _compare_manifests(self, first: DashManifest, second: DashManifest):
self.assertEqual(first.manifestName, second.manifestName)
self.assertEqual(first.description, second.description)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _compare_periods(self, first: Period, second: Period):
self.assertEqual(first.start, second.start)
self.assertEqual(first.duration, second.duration)
return True
def _compare_adaptationsets(self, first: AbstractAdaptationSet, second: AbstractAdaptationSet):
return True
def _compare_video_adaptationsets(self, first: VideoAdaptationSet, second: VideoAdaptationSet):
self._compare_adaptationsets(first, second)
return True
def _compare_subtitle_adaptationsets(self, first: SubtitleAdaptationSet, second: SubtitleAdaptationSet):
self._compare_adaptationsets(first, second)
return True
def _compare_fmp4_representations(self, first: AbstractFMP4Representation, second: AbstractFMP4Representation):
self.assertEqual(first.type, second.type)
self.assertEqual(first.encodingId, second.encodingId)
self.assertEqual(first.muxingId, second.muxingId)
self.assertEqual(first.segmentPath, second.segmentPath)
self.assertEqual(first.startSegmentNumber, second.startSegmentNumber)
self.assertEqual(first.endSegmentNumber, second.endSegmentNumber)
self.assertEqual(first.startKeyframeId, second.startKeyframeId)
self.assertEqual(first.endKeyframeId, second.endKeyframeId)
return True
def _compare_drm_fmp4_representations(self, first: DRMFMP4Representation, second: DRMFMP4Representation):
self._compare_fmp4_representations(first, second)
self.assertEqual(first.drmId, second.drmId)
return True
def _compare_webm_representations(self, first: WebMRepresentation, second: WebMRepresentation):
self.assertEqual(first.type, second.type)
self.assertEqual(first.encodingId, second.encodingId)
self.assertEqual(first.muxingId, second.muxingId)
self.assertEqual(first.segmentPath, second.segmentPath)
self.assertEqual(first.startSegmentNumber, second.startSegmentNumber)
return True
def _compare_vtt_representations(self, first: VttRepresentation, second: VttRepresentation):
self.assertEqual(first.vttUrl, second.vttUrl)
return True
def _compare_mp4_representations(self, first: DashMP4Representation, second: DashMP4Representation):
self.assertEqual(first.encodingId, second.encodingId)
self.assertEqual(first.muxingId, second.muxingId)
self.assertEqual(first.filePath, second.filePath)
return True
def _compare_encodings(self, first: Encoding, second: Encoding):
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
self.assertEqual(first.encoderVersion, second.encoderVersion)
self.assertEqual(first.cloudRegion, second.cloudRegion)
return True
def _compare_muxings(self, first: FMP4Muxing, second: FMP4Muxing):
self.assertEqual(first.initSegmentName, second.initSegmentName)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.segmentLength, second.segmentLength)
self.assertEqual(first.segmentNaming, second.segmentNaming)
self.assertEqual(first.name, second.name)
self.assertEqual(second.description, second.description)
return True
def _compare_drms(self, first: MarlinDRM, second: MarlinDRM):
self.assertEqual(first.kid, second.kid)
self.assertEqual(first.key, second.key)
self.assertEqual(len(first.outputs), len(second.outputs))
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
return True
def _get_sample_manifest(self):
encoding_output = self._get_sample_encoding_output()
manifest = DashManifest(manifest_name='bitmovin-python_Sample_DASH_Manifest.mpd', outputs=[encoding_output],
name='Sample DASH Manifest')
self.assertIsNotNone(manifest)
self.assertIsNotNone(manifest.manifestName)
self.assertIsNotNone(manifest.outputs)
return manifest
def _get_sample_encoding_output(self):
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/' + str(uuid.uuid4()),
acl=[acl_entry])
return encoding_output
def _get_sample_period_default(self):
period = Period()
return period
def _get_special_period(self):
period = self._get_sample_period_default()
period.start = 1.33
period.duration = 67.3
return period
def _get_sample_adaptationset(self):
video_adaptationset = VideoAdaptationSet()
return video_adaptationset
def _get_sample_subtitle_adaptationset(self):
subtitle_adaptationset = SubtitleAdaptationSet(lang='eng')
return subtitle_adaptationset
def _get_sample_fmp4_representation(self):
encoding_id = self.sampleEncoding.id
muxing_id = self.sampleMuxing.id
fmp4_representation = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding_id,
muxing_id=muxing_id,
segment_path='/path/to/segments/',
start_segment_number=1,
end_segment_number=2)
return fmp4_representation
def _get_sample_fmp4_representation_with_keyframes(self):
encoding_id = self.sampleEncoding.id
muxing_id = self.sampleMuxing.id
fmp4_representation = FMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding_id,
muxing_id=muxing_id,
segment_path='/path/to/segments/',
start_keyframe_id='345678987654345678',
end_keyframe_id='3453453454')
return fmp4_representation
def _get_sample_drm_fmp4_representation(self):
encoding_id = self.sampleEncoding.id
muxing_id = self.sampleMuxing.id
drm_id = self.sampleDrm.id
fmp4_representation = DRMFMP4Representation(type=FMP4RepresentationType.TEMPLATE,
encoding_id=encoding_id,
muxing_id=muxing_id,
segment_path='/path/to/segments/',
start_segment_number=1,
end_segment_number=2,
drm_id=drm_id)
return fmp4_representation
def _get_sample_webm_representation(self):
encoding_id = self.sampleEncoding.id
muxing_id = self.sampleMuxing.id
webm_representation = WebMRepresentation(type=WebMRepresentationType.TEMPLATE,
encoding_id=encoding_id,
muxing_id=muxing_id,
segment_path='/path/to/segments/',
start_segment_number=1)
return webm_representation
def _get_sample_mp4_representation(self):
encoding_id = self.sampleEncoding.id
muxing_id = self.sampleMuxing.id
mp4_representation = DashMP4Representation(encoding_id=encoding_id,
muxing_id=muxing_id,
file_path='/path/to/file.mp4')
return mp4_representation
def _get_sample_vtt_representation(self):
webm_representation = VttRepresentation(vtt_url='https://yourhost.com/path/mysubtitles.vtt')
return webm_representation
def _get_sample_muxing(self):
stream = self._get_sample_stream()
create_stream_response = self.bitmovin.encodings.Stream.create(object_=stream,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(create_stream_response)
self.assertIsNotNone(create_stream_response.resource)
self.assertIsNotNone(create_stream_response.resource.id)
muxing_stream = MuxingStream(stream_id=create_stream_response.resource.id)
muxing = FMP4Muxing(streams=[muxing_stream],
segment_length=4,
segment_naming='seg_%number%.m4s',
init_segment_name='init.mp4',
outputs=stream.outputs,
name='Sample FMP4 Muxing')
return muxing
def _get_sample_stream(self):
sample_codec_configuration = self.utils.get_sample_h264_codec_configuration()
h264_codec_configuration = self.bitmovin.codecConfigurations.H264.create(sample_codec_configuration)
(sample_input, sample_files) = self.utils.get_sample_s3_input()
s3_input = self.bitmovin.inputs.S3.create(sample_input)
stream_input = StreamInput(input_id=s3_input.resource.id, input_path=sample_files.get('854b9c98-17b9-49ed-b75c-3b912730bfd1'), selection_mode='AUTO')
acl_entry = ACLEntry(scope='string', permission=ACLPermission.PUBLIC_READ)
sample_output = self.utils.get_sample_s3_output()
s3_output = self.bitmovin.outputs.S3.create(sample_output)
encoding_output = EncodingOutput(output_id=s3_output.resource.id,
output_path='/bitmovin-python/StreamTests/'+str(uuid.uuid4()),
acl=[acl_entry])
stream = Stream(codec_configuration_id=h264_codec_configuration.resource.id,
input_streams=[stream_input],
outputs=[encoding_output],
name='Sample Stream')
self.assertIsNotNone(stream.codecConfigId)
self.assertIsNotNone(stream.inputStreams)
self.assertIsNotNone(stream.outputs)
return stream
def _create_sample_encoding(self):
sample_encoding = self.utils.get_sample_encoding()
encoding_resource_response = self.bitmovin.encodings.Encoding.create(sample_encoding)
self.assertIsNotNone(encoding_resource_response)
self.assertIsNotNone(encoding_resource_response.resource)
self.assertIsNotNone(encoding_resource_response.resource.id)
self._compare_encodings(sample_encoding, encoding_resource_response.resource)
return encoding_resource_response.resource
def _create_sample_muxing(self):
sample_muxing = self._get_sample_muxing()
muxing_resource_response = self.bitmovin.encodings.Muxing.FMP4.create(object_=sample_muxing,
encoding_id=self.sampleEncoding.id)
self.assertIsNotNone(muxing_resource_response)
self.assertIsNotNone(muxing_resource_response.resource)
self.assertIsNotNone(muxing_resource_response.resource.id)
self._compare_muxings(sample_muxing, muxing_resource_response.resource)
return muxing_resource_response.resource
def _create_sample_drm(self):
sample_drm = self._get_sample_drm_marlin()
drm_resource_response = self.bitmovin.encodings.Muxing.FMP4.DRM.Marlin.create(
object_=sample_drm, encoding_id=self.sampleEncoding.id, muxing_id=self.sampleMuxing.id)
self.assertIsNotNone(drm_resource_response)
self.assertIsNotNone(drm_resource_response.resource)
self.assertIsNotNone(drm_resource_response.resource.id)
self._compare_drms(sample_drm, drm_resource_response.resource)
return drm_resource_response.resource
def _get_sample_drm_marlin(self):
sample_output = self._get_sample_encoding_output()
sample_output.outputPath += '/drm'
marlin_drm_settings = self.settings.get('sampleObjects').get('drmConfigurations').get('Marlin')
drm = MarlinDRM(key=marlin_drm_settings[0].get('key'),
kid=marlin_drm_settings[0].get('kid'),
outputs=[sample_output],
name='Sample Marlin DRM')
return drm
if __name__ == '__main__':
unittest.main()
|
unlicense
|
22ad9857f52f5b6d86526f80226292fa
| 55.678
| 157
| 0.690427
| 4.204599
| false
| false
| false
| false
|
openaddresses/machine
|
test.py
|
1
|
2037
|
# coding=utf8
"""
Run Python test suite via the standard unittest mechanism.
Usage:
python test.py
python test.py --logall
python test.py TestConformTransforms
python test.py -l TestOA.test_process
All logging is suppressed unless --logall or -l specified
~/.openaddr-logging-test.json can also be used to configure log behavior
"""
import unittest
import sys, os
import logging
if 'DATABASE_URL' not in os.environ:
# Default to the testing DB if no DATABASE_URL env var is found.
os.environ['DATABASE_URL'] = 'postgres://openaddr:openaddr@localhost/openaddr'
from openaddr import jobs
from openaddr.tests import TestOA, TestState, TestPackage
from openaddr.tests.sample import TestSample
from openaddr.tests.cache import TestCacheExtensionGuessing, TestCacheEsriDownload
from openaddr.tests.conform import TestConformCli, TestConformTransforms, TestConformMisc, TestConformCsv, TestConformLicense, TestConformTests
from openaddr.tests.render import TestRender
from openaddr.tests.dotmap import TestDotmap
from openaddr.tests.preview import TestPreview
from openaddr.tests.slippymap import TestSlippyMap
from openaddr.tests.util import TestUtilities
from openaddr.tests.summarize import TestSummarizeFunctions
from openaddr.tests.parcels import TestParcelsUtils, TestParcelsParse
from openaddr.tests.dashboard_stats import TestDashboardStats
from openaddr.tests.coverage import TestCalculate
from openaddr.tests.ci import (
TestHook, TestRuns, TestWorker, TestBatch, TestObjects, TestCollect,
TestAPI, TestQueue, TestAuth, TestTileIndex, TestLogging
)
if __name__ == '__main__':
# Allow the user to turn on logging with -l or --logall
# unittest.main() has its own command line so we slide this in first
level = logging.CRITICAL
for i, arg in enumerate(sys.argv[1:]):
if arg == "-l" or arg == "--logall":
level = logging.DEBUG
del sys.argv[i]
jobs.setup_logger(log_level = level, log_config_file = "~/.openaddr-logging-test.json")
unittest.main()
|
isc
|
9b6fe92e3eb7f361967cb4f1c01db149
| 38.941176
| 143
| 0.769269
| 3.690217
| false
| true
| false
| false
|
mitre/multiscanner
|
multiscanner/storage/basic_elasticsearch_storage.py
|
2
|
4795
|
"""
Storage module that will interact with elasticsearch in a simple way.
"""
from uuid import uuid4
from elasticsearch import Elasticsearch
from multiscanner.storage import storage
class BasicElasticSearchStorage(storage.Storage):
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 9200,
'index': 'multiscanner_reports',
'doc_type': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.index = self.config['index']
self.doc_type = self.config['doc_type']
self.es = Elasticsearch(
host=self.host,
port=self.port
)
self.warned_changed = False
self.warned_renamed = False
return True
def store(self, report):
report_id_list = []
report_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
report_data = self.dedot(report[filename])
report_data = self.same_type_lists(report_data)
report_list.append(
{
'_index': self.index,
'_type': self.doc_type,
'_id': report_id,
'_source': report_data
}
)
# result = helpers.bulk(self.es, report_list)
return report_id_list
def teardown(self):
pass
def dedot(self, dictionary):
"""
Replaces all dictionary keys with a '.' in them to make Elasticsearch happy
:param dictionary: The dictionary object
:return: Dictionary
"""
for key in dictionary.keys():
if isinstance(dictionary[key], dict):
dictionary[key] = self.dedot(dictionary[key])
if '.' in key:
new_key = key.replace('.', '_')
dictionary[new_key] = dictionary[key]
del dictionary[key]
if not self.warned_renamed:
print('WARNING: Some keys had a . in their name which was replaced with a _')
self.warned_renamed = True
return dictionary
def same_type_lists(self, dictionary):
"""
Make sure all lists in a dictionary have elements that are the same type. Otherwise it converts
them to strings. This does not include list and dict elements.
:param dictionary: The dictionary object
:return: Dictionary
"""
for key in dictionary:
if isinstance(dictionary[key], list) and dictionary[key]:
dictionary[key] = self.normalize_list(dictionary[key])
if isinstance(dictionary[key], dict):
dictionary[key] = self.same_type_lists(dictionary[key])
return dictionary
def check_same_types(self, array):
"""
Make sure all elements are the same type. This does not include list and dict elements.
:param array: The list to check
:return: True or False
"""
if not array:
return True
t = type(array[0])
for item in array:
if not isinstance(item, list) and not isinstance(item, dict) and type(item) != t:
return False
return True
def normalize_list(self, array):
"""
Make sure all elements that are the same type. Otherwise it converts them to strings. This does not include list
and dict elements.
:param array: The list to check
:return: List
"""
# If we have a list of lists we recurse
if isinstance(array[0], list):
for i in range(0, len(array)):
array[i] = self.normalize_list(array[i])
# If we have a list of dicts look into them
elif isinstance(array[0], dict):
for i in range(0, len(array)):
array[i] = self.same_type_lists(array[i])
elif not self.check_same_types(array):
for i in range(0, len(array)):
if isinstance(array[i], list):
array[i] = self.normalize_list(array[i])
elif isinstance(array[i], dict):
array[i] = self.same_type_lists(array[i])
else:
array[i] = str(array[i])
if not self.warned_changed:
print("WARNING: We changed some of the data types so that Elasticsearch wouldn't get angry")
self.warned_changed = True
return array
|
mpl-2.0
|
022f8646af5e6b4f21a7afebd959ff58
| 35.052632
| 120
| 0.542023
| 4.502347
| false
| false
| false
| false
|
mitre/multiscanner
|
multiscanner/modules/Metadata/ExifToolsScan.py
|
2
|
3427
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import subprocess
import re
from multiscanner.config import CONFIG
from multiscanner.common.utils import list2cmdline, sshexec, SSH
subprocess.list2cmdline = list2cmdline
__author__ = "Drew Bonasera"
__license__ = "MPL 2.0"
TYPE = "Metadata"
NAME = "ExifTool"
# These are overwritten by the config file
HOST = ("MultiScanner", 22, "User")
KEY = os.path.join(os.path.split(CONFIG)[0], "etc", "id_rsa")
PATHREPLACE = "X:\\"
# Entries to be removed from the final results
REMOVEENTRY = ["ExifTool Version Number", "File Name", "Directory", "File Modification Date/Time",
"File Creation Date/Time", "File Access Date/Time", "File Permissions"]
DEFAULTCONF = {
"cmdline": ["-t"],
"path": "C:\\exiftool.exe",
"key": KEY,
"host": HOST,
"replacement path": PATHREPLACE,
"remove-entry": REMOVEENTRY,
"ENABLED": True
}
def check(conf=DEFAULTCONF):
if not conf['ENABLED']:
return False
if os.path.isfile(conf["path"]):
if 'replacement path' in conf:
del conf['replacement path']
return True
if SSH:
return True
else:
return False
def scan(filelist, conf=DEFAULTCONF):
if os.path.isfile(conf["path"]):
local = True
else:
local = False
cmdline = conf["cmdline"]
results = []
cmd = cmdline
for item in filelist:
cmd.append('"' + item + '" ')
cmd.insert(0, conf["path"])
host, port, user = conf["host"]
if local:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
try:
output = sshexec(host, list2cmdline(cmd), port=port, username=user, key_filename=conf["key"])
except Exception as e:
# TODO: log exception
return None
output = output.decode("utf-8", errors="ignore")
output = output.replace('\r', '')
reader = output.split('\n')
data = {}
fname = filelist[0]
for row in reader:
row = row.split('\t')
try:
if row[0].startswith('======== '):
if data:
results.append((fname, data))
data = {}
fname = row[0][9:]
if re.match('[A-Za-z]:/', fname):
# why exif tools, whyyyyyyyy
fname = fname.replace('/', '\\')
continue
except Exception as e:
# TODO: log exception
pass
try:
if row[0] not in conf['remove-entry']:
data[row[0]] = row[1]
except Exception as e:
# TODO: log exception
continue
if data:
results.append((fname, data))
# Gather metadata
metadata = {}
output = output.replace('\r', '')
reader = output.split('\n')
for row in reader:
row = row.split('\t')
if row and row[0] == "ExifTool Version Number":
metadata["Program version"] = row[1]
break
metadata["Name"] = NAME
metadata["Type"] = TYPE
return (results, metadata)
|
mpl-2.0
|
5a6441283d3fcf5f25217722aaaabc3c
| 28.042373
| 105
| 0.569303
| 3.903189
| false
| false
| false
| false
|
mitre/multiscanner
|
multiscanner/modules/Detonation/FireeyeScan.py
|
2
|
2339
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import time
import shutil
__author__ = "Drew Bonasera"
__license__ = "MPL 2.0"
TYPE = "Detonation"
NAME = "FireEye"
DEFAULTCONF = {
"fireeye images": ["win7-sp1", "win7x64-sp1", "winxp-sp2", "winxp-sp3"],
"ENABLED": False,
"good path": "good",
"base path": "/mnt/fireeyeshare/",
"bad path": "bad",
"src path": "src"
}
def check(conf=DEFAULTCONF):
return conf["ENABLED"]
def scan(filelist, conf=DEFAULTCONF):
base = conf["base path"]
FEGood = conf["good path"]
FEBad = conf["bad path"]
FESrc = conf["src path"]
FireEyeImages = conf["fireeye images"]
results = {}
resultlist = []
waitlist = []
# Checks if the img dir exist in list, if not remove
for imgPath in FireEyeImages[:]:
if not (os.path.isdir(os.path.join(base, imgPath))):
print("WARNING: Fireeye path not found -", (os.path.join(base, imgPath)))
FireEyeImages.remove(imgPath)
timestamp = str(time.time()).replace('.', '-')
for fname in filelist:
filename = timestamp + "-" + os.path.basename(fname)
for img in FireEyeImages:
shutil.copyfile(fname, os.path.join(base, img, FESrc, filename))
waitlist.append((filename, img, fname))
results[fname] = []
while waitlist:
for filename, img, fname in waitlist[:]:
if os.path.isfile(os.path.join(base, img, FEGood, filename)):
os.remove(os.path.join(base, img, FEGood, filename))
elif os.path.isfile(os.path.join(base, img, FEBad, filename)):
results[fname].append(img)
os.remove(os.path.join(base, img, FEBad, filename))
else:
continue
waitlist.remove((filename, img, fname))
time.sleep(20)
for key, result in results.items():
if results:
result.sort()
resultlist.append((key, result))
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
return (resultlist, metadata)
|
mpl-2.0
|
37b029fc16a0d67bbf668801a8bce7d9
| 31.486111
| 98
| 0.604532
| 3.501497
| false
| false
| false
| false
|
mitre/multiscanner
|
multiscanner/web/app.py
|
2
|
3646
|
import codecs
from collections import namedtuple
import configparser
from flask import Flask, render_template, request
import os
import re
from multiscanner import CONFIG as MS_CONFIG
from multiscanner import __version__
from multiscanner.common import utils
DEFAULTCONF = {
'HOST': "localhost",
'PORT': 8000,
'API_LOC': "http://localhost:8080",
'FLOWER_LOC': "http://localhost:5555",
'KIBANA_LOC': "http://localhost:5601",
'DEBUG': False,
'METADATA_FIELDS': [
"Submitter Name",
"Submission Description",
"Submitter Email",
"Submitter Organization",
"Submitter Phone",
],
'TAGS': [
"Malware",
"Benign"
]
}
app = Flask(__name__)
# Finagle Flask to read config from .ini file instead of .py file
web_config_object = configparser.SafeConfigParser()
web_config_object.optionxform = str
web_config_file = utils.get_config_path(MS_CONFIG, 'web')
web_config_object.read(web_config_file)
if not web_config_object.has_section('web') or not os.path.isfile(web_config_file):
# Write default config
web_config_object.add_section('web')
for key in DEFAULTCONF:
web_config_object.set('web', key, str(DEFAULTCONF[key]))
conffile = codecs.open(web_config_file, 'w', 'utf-8')
web_config_object.write(conffile)
conffile.close()
web_config = utils.parse_config(web_config_object)['web']
conf_tuple = namedtuple('WebConfig', web_config.keys())(*web_config.values())
app.config.from_object(conf_tuple)
@app.context_processor
def inject_locs():
d = {
'api_loc': app.config.get('API_LOC', DEFAULTCONF['API_LOC']),
'flower_loc': app.config.get('FLOWER_LOC', DEFAULTCONF['FLOWER_LOC'])
}
return d
@app.route('/', methods=['GET'])
def index():
return render_template('index.html',
metadata_fields=app.config.get('METADATA_FIELDS', {}),
tags=app.config.get('TAGS', []))
@app.route('/analyses', methods=['GET', 'POST'])
def tasks():
if request.method == 'POST':
return render_template('analyses.html',
search_term=request.form['search_term'],
search_type=request.form['search_type_buttons'])
else:
return render_template('analyses.html')
@app.route('/report/<int:task_id>', methods=['GET'])
def reports(task_id=1):
term = re.escape(request.args.get('st', ''))
return render_template('report.html', task_id=task_id,
search_term=term, tags=app.config.get('TAGS', []))
@app.route('/history', methods=['GET', 'POST'])
def history():
if request.method == 'POST':
return render_template('history.html',
search_term=request.form['search_term'],
search_type=request.form['search_type_buttons'])
else:
return render_template('history.html')
@app.route('/analytics', methods=['GET'])
def analytics():
return render_template('analytics.html')
@app.route('/about', methods=['GET'])
def about():
return render_template('about.html',
version=__version__)
@app.route('/system-health', methods=['GET'])
def system_health():
return render_template('system-health.html',
kibana_loc=app.config.get('KIBANA_LOC', DEFAULTCONF['KIBANA_LOC']))
def _main():
app.run(debug=app.config.get('DEBUG', DEFAULTCONF['DEBUG']),
port=app.config.get('PORT', DEFAULTCONF['PORT']),
host=app.config.get('HOST', DEFAULTCONF['HOST']))
if __name__ == "__main__":
_main()
|
mpl-2.0
|
8cba6a9cb76b5d1c5b28ee2e95479656
| 29.383333
| 94
| 0.611355
| 3.642358
| false
| true
| false
| false
|
odlgroup/odl
|
odl/trafos/wavelet.py
|
2
|
26565
|
# Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Discrete wavelet transformation on L2 spaces."""
from __future__ import absolute_import, division, print_function
import numpy as np
from odl.discr import DiscretizedSpace
from odl.operator import Operator
from odl.trafos.backends.pywt_bindings import (
PYWT_AVAILABLE, precompute_raveled_slices, pywt_pad_mode, pywt_wavelet)
__all__ = ('WaveletTransform', 'WaveletTransformInverse')
_SUPPORTED_WAVELET_IMPLS = ()
if PYWT_AVAILABLE:
_SUPPORTED_WAVELET_IMPLS += ('pywt',)
import pywt
class WaveletTransformBase(Operator):
"""Base class for discrete wavelet transforms.
This abstract class is intended to share code between the forward,
inverse and adjoint wavelet transforms.
"""
def __init__(self, space, wavelet, nlevels, variant, pad_mode='constant',
pad_const=0, impl='pywt', axes=None):
"""Initialize a new instance.
Parameters
----------
space : `DiscretizedSpace`
Domain of the forward wavelet transform (the "image domain").
In the case of ``variant in ('inverse', 'adjoint')``, this
space is the range of the operator.
wavelet : string or `pywt.Wavelet`
Specification of the wavelet to be used in the transform.
If a string is given, it is converted to a `pywt.Wavelet`.
Use `pywt.wavelist` to get a list of available wavelets.
Possible wavelet families are:
``'haar'``: Haar
``'db'``: Daubechies
``'sym'``: Symlets
``'coif'``: Coiflets
``'bior'``: Biorthogonal
``'rbio'``: Reverse biorthogonal
``'dmey'``: Discrete FIR approximation of the Meyer wavelet
variant : {'forward', 'inverse', 'adjoint'}
Wavelet transform variant to be created.
nlevels : positive int, optional
Number of scaling levels to be used in the decomposition. The
maximum number of levels can be calculated with
`pywt.dwtn_max_level`.
Default: Use maximum number of levels.
pad_mode : string, optional
Method to be used to extend the signal.
``'constant'``: Fill with ``pad_const``.
``'symmetric'``: Reflect at the boundaries, not repeating the
outmost values.
``'periodic'``: Fill in values from the other side, keeping
the order.
``'order0'``: Extend constantly with the outmost values
(ensures continuity).
``'order1'``: Extend with constant slope (ensures continuity of
the first derivative). This requires at least 2 values along
each axis where padding is applied.
``'pywt_per'``: like ``'periodic'``-padding but gives the smallest
possible number of decomposition coefficients.
Only available with ``impl='pywt'``, See ``pywt.Modes.modes``.
``'reflect'``: Reflect at the boundary, without repeating the
outmost values.
``'antisymmetric'``: Anti-symmetric variant of ``symmetric``.
``'antireflect'``: Anti-symmetric variant of ``reflect``.
For reference, the following table compares the naming conventions
for the modes in ODL vs. PyWavelets::
======================= ==================
ODL PyWavelets
======================= ==================
symmetric symmetric
reflect reflect
order1 smooth
order0 constant
constant, pad_const=0 zero
periodic periodic
pywt_per periodization
antisymmetric antisymmetric
antireflect antireflect
======================= ==================
See `signal extension modes`_ for an illustration of the modes
(under the PyWavelets naming conventions).
pad_const : float, optional
Constant value to use if ``pad_mode == 'constant'``. Ignored
otherwise. Constants other than 0 are not supported by the
``pywt`` back-end.
impl : {'pywt'}, optional
Back-end for the wavelet transform.
axes : sequence of ints, optional
Axes over which the DWT that created ``coeffs`` was performed. The
default value of ``None`` corresponds to all axes. When not all
axes are included this is analagous to a batch transform in
``len(axes)`` dimensions looped over the non-transformed axes. In
orther words, filtering and decimation does not occur along any
axes not in ``axes``.
References
----------
.. _signal extension modes:
https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html
"""
if not isinstance(space, DiscretizedSpace):
raise TypeError(
'`space` {!r} is not a `DiscretizedSpace` instance'
''.format(space)
)
self.__impl, impl_in = str(impl).lower(), impl
if self.impl not in _SUPPORTED_WAVELET_IMPLS:
raise ValueError("`impl` '{}' not supported".format(impl_in))
if axes is None:
axes = tuple(range(space.ndim))
elif np.isscalar(axes):
axes = (axes,)
elif len(axes) > space.ndim:
raise ValueError("too many axes")
self.axes = tuple(axes)
if nlevels is None:
nlevels = pywt.dwtn_max_level(space.shape, wavelet, self.axes)
self.__nlevels, nlevels_in = int(nlevels), nlevels
if self.nlevels != nlevels_in:
raise ValueError('`nlevels` must be integer, got {}'
''.format(nlevels_in))
self.__impl, impl_in = str(impl).lower(), impl
if self.impl not in _SUPPORTED_WAVELET_IMPLS:
raise ValueError("`impl` '{}' not supported".format(impl_in))
self.__wavelet = getattr(wavelet, 'name', str(wavelet).lower())
self.__pad_mode = str(pad_mode).lower()
self.__pad_const = space.field.element(pad_const)
if self.impl == 'pywt':
self.pywt_pad_mode = pywt_pad_mode(pad_mode, pad_const)
self.pywt_wavelet = pywt_wavelet(self.wavelet)
# determine coefficient shapes (without running wavedecn)
self._coeff_shapes = pywt.wavedecn_shapes(
space.shape, wavelet, mode=self.pywt_pad_mode,
level=self.nlevels, axes=self.axes)
# precompute slices into the (raveled) coeffs
self._coeff_slices = precompute_raveled_slices(self._coeff_shapes)
coeff_size = pywt.wavedecn_size(self._coeff_shapes)
coeff_space = space.tspace_type(coeff_size, dtype=space.dtype)
else:
raise RuntimeError("bad `impl` '{}'".format(self.impl))
variant, variant_in = str(variant).lower(), variant
if variant not in ('forward', 'inverse', 'adjoint'):
raise ValueError("`variant` '{}' not understood"
"".format(variant_in))
self.__variant = variant
if variant == 'forward':
super(WaveletTransformBase, self).__init__(
domain=space, range=coeff_space, linear=True)
else:
super(WaveletTransformBase, self).__init__(
domain=coeff_space, range=space, linear=True)
@property
def impl(self):
"""Implementation back-end of this wavelet transform."""
return self.__impl
@property
def nlevels(self):
"""Number of scaling levels in this wavelet transform."""
return self.__nlevels
@property
def wavelet(self):
"""Name of the wavelet used in this wavelet transform."""
return self.__wavelet
@property
def pad_mode(self):
"""Padding mode used for extending input beyond its boundary."""
return self.__pad_mode
@property
def pad_const(self):
"""Value for extension used in ``'constant'`` padding mode."""
return self.__pad_const
@property
def is_orthogonal(self):
"""Whether or not the wavelet basis is orthogonal."""
return self.pywt_wavelet.orthogonal
@property
def is_biorthogonal(self):
"""Whether or not the wavelet basis is bi-orthogonal."""
return self.pywt_wavelet.biorthogonal
def scales(self):
"""Get the scales of each coefficient.
Returns
-------
scales : ``range`` element
The scale of each coefficient, given by an integer. 0 for the
lowest resolution and self.nlevels for the highest.
"""
if self.impl == 'pywt':
if self.__variant == 'forward':
discr_space = self.domain
wavelet_space = self.range
else:
discr_space = self.range
wavelet_space = self.domain
shapes = pywt.wavedecn_shapes(discr_space.shape, self.pywt_wavelet,
mode=self.pywt_pad_mode,
level=self.nlevels, axes=self.axes)
coeff_list = [np.full(shapes[0], 0)]
for i in range(1, 1 + len(shapes[1:])):
coeff_list.append({k: np.full(shapes[i][k], i)
for k in shapes[i].keys()})
coeffs = pywt.ravel_coeffs(coeff_list, axes=self.axes)[0]
return wavelet_space.element(coeffs)
else:
raise RuntimeError("bad `impl` '{}'".format(self.impl))
class WaveletTransform(WaveletTransformBase):
"""Discrete wavelet transform between discretized Lp spaces."""
def __init__(self, domain, wavelet, nlevels=None, pad_mode='constant',
pad_const=0, impl='pywt', axes=None):
"""Initialize a new instance.
Parameters
----------
domain : `DiscretizedSpace`
Domain of the wavelet transform (the "image domain").
wavelet : string or `pywt.Wavelet`
Specification of the wavelet to be used in the transform.
If a string is given, it is converted to a `pywt.Wavelet`.
Use `pywt.wavelist` to get a list of available wavelets.
Possible wavelet families are:
``'haar'``: Haar
``'db'``: Daubechies
``'sym'``: Symlets
``'coif'``: Coiflets
``'bior'``: Biorthogonal
``'rbio'``: Reverse biorthogonal
``'dmey'``: Discrete FIR approximation of the Meyer wavelet
nlevels : positive int, optional
Number of scaling levels to be used in the decomposition. The
maximum number of levels can be calculated with
`pywt.dwtn_max_level`.
Default: Use maximum number of levels.
pad_mode : string, optional
Method to be used to extend the signal.
``'constant'``: Fill with ``pad_const``.
``'symmetric'``: Reflect at the boundaries, not repeating the
outmost values.
``'periodic'``: Fill in values from the other side, keeping
the order.
``'order0'``: Extend constantly with the outmost values
(ensures continuity).
``'order1'``: Extend with constant slope (ensures continuity of
the first derivative). This requires at least 2 values along
each axis where padding is applied.
``'pywt_per'``: like ``'periodic'``-padding but gives the smallest
possible number of decomposition coefficients.
Only available with ``impl='pywt'``, See ``pywt.Modes.modes``.
``'reflect'``: Reflect at the boundary, without repeating the
outmost values.
``'antisymmetric'``: Anti-symmetric variant of ``symmetric``.
``'antireflect'``: Anti-symmetric variant of ``reflect``.
For reference, the following table compares the naming conventions
for the modes in ODL vs. PyWavelets::
======================= ==================
ODL PyWavelets
======================= ==================
symmetric symmetric
reflect reflect
order1 smooth
order0 constant
constant, pad_const=0 zero
periodic periodic
pywt_per periodization
antisymmetric antisymmetric
antireflect antireflect
======================= ==================
See `signal extension modes`_ for an illustration of the modes
(under the PyWavelets naming conventions).
pad_const : float, optional
Constant value to use if ``pad_mode == 'constant'``. Ignored
otherwise. Constants other than 0 are not supported by the
``pywt`` back-end.
impl : {'pywt'}, optional
Backend for the wavelet transform.
axes : sequence of ints, optional
Axes over which the DWT that created ``coeffs`` was performed. The
default value of ``None`` corresponds to all axes. When not all
axes are included this is analagous to a batch transform in
``len(axes)`` dimensions looped over the non-transformed axes. In
orther words, filtering and decimation does not occur along any
axes not in ``axes``.
Examples
--------
Compute a very simple wavelet transform in a discrete 2D space with
4 sampling points per axis:
>>> space = odl.uniform_discr([0, 0], [1, 1], (4, 4))
>>> wavelet_trafo = odl.trafos.WaveletTransform(
... domain=space, nlevels=1, wavelet='haar')
>>> wavelet_trafo.is_biorthogonal
True
>>> data = [[1, 1, 1, 1],
... [0, 0, 0, 0],
... [0, 0, 1, 1],
... [1, 0, 1, 0]]
>>> decomp = wavelet_trafo(data)
>>> decomp.shape
(16,)
It is also possible to apply the transform only along a subset of the
axes. Here, we apply a 1D wavelet transfrom along axis 0 for each
index along axis 1:
>>> wavelet_trafo = odl.trafos.WaveletTransform(
... domain=space, nlevels=1, wavelet='haar', axes=(0,))
>>> decomp = wavelet_trafo(data)
>>> decomp.shape
(16,)
In general, the size of the coefficients may exceed the size of the
input data when the wavelet is longer than the Haar wavelet. This
due to extra coefficients that must be kept for perfect reconstruction.
No extra boundary coefficients are needed when the edge mode is
``"pywt_periodic"`` and the size along each transformed axis is a
multiple of ``2**nlevels``.
>>> space = odl.uniform_discr([0, 0], [1, 1], (16, 16))
>>> space.size
256
>>> wavelet_trafo = odl.trafos.WaveletTransform(
... domain=space, nlevels=2, wavelet='db2',
... pad_mode='pywt_periodic')
>>> decomp = wavelet_trafo(np.ones(space.shape))
>>> decomp.shape
(256,)
>>> wavelet_trafo = odl.trafos.WaveletTransform(
... domain=space, nlevels=2, wavelet='db2', pad_mode='symmetric')
>>> decomp = wavelet_trafo(np.ones(space.shape))
>>> decomp.shape
(387,)
References
----------
.. _signal extension modes:
https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html
"""
super(WaveletTransform, self).__init__(
space=domain, wavelet=wavelet, nlevels=nlevels, variant='forward',
pad_mode=pad_mode, pad_const=pad_const, impl=impl, axes=axes)
def _call(self, x):
"""Return wavelet transform of ``x``."""
if self.impl == 'pywt':
coeffs = pywt.wavedecn(
x, wavelet=self.pywt_wavelet, level=self.nlevels,
mode=self.pywt_pad_mode, axes=self.axes)
return pywt.ravel_coeffs(coeffs, axes=self.axes)[0]
else:
raise RuntimeError("bad `impl` '{}'".format(self.impl))
@property
def adjoint(self):
"""Adjoint wavelet transform.
Returns
-------
adjoint : `WaveletTransformInverse`
If the transform is orthogonal, the adjoint is the inverse.
Raises
------
OpNotImplementedError
if `is_orthogonal` is ``False``
"""
if self.is_orthogonal:
scale = 1 / self.domain.partition.cell_volume
return scale * self.inverse
else:
# TODO: put adjoint here
return super(WaveletTransform, self).adjoint
@property
def inverse(self):
"""Inverse wavelet transform.
Returns
-------
inverse : `WaveletTransformInverse`
See Also
--------
adjoint
"""
return WaveletTransformInverse(
range=self.domain, wavelet=self.pywt_wavelet, nlevels=self.nlevels,
pad_mode=self.pad_mode, pad_const=self.pad_const, impl=self.impl,
axes=self.axes)
class WaveletTransformInverse(WaveletTransformBase):
"""Discrete inverse wavelet trafo between discrete L2 spaces.
See Also
--------
WaveletTransform
"""
def __init__(self, range, wavelet, nlevels=None, pad_mode='constant',
pad_const=0, impl='pywt', axes=None):
"""Initialize a new instance.
Parameters
----------
range : `DiscretizedSpace`
Domain of the forward wavelet transform (the "image domain"),
which is the range of this inverse transform.
wavelet : string or `pywt.Wavelet`
Specification of the wavelet to be used in the transform.
If a string is given, it is converted to a `pywt.Wavelet`.
Use `pywt.wavelist` to get a list of available wavelets.
Possible wavelet families are:
``'haar'``: Haar
``'db'``: Daubechies
``'sym'``: Symlets
``'coif'``: Coiflets
``'bior'``: Biorthogonal
``'rbio'``: Reverse biorthogonal
``'dmey'``: Discrete FIR approximation of the Meyer wavelet
nlevels : positive int, optional
Number of scaling levels to be used in the decomposition. The
maximum number of levels can be calculated with
`pywt.dwtn_max_level`.
Default: Use maximum number of levels.
pad_mode : string, optional
Method to be used to extend the signal.
``'constant'``: Fill with ``pad_const``.
``'symmetric'``: Reflect at the boundaries, not repeating the
outmost values.
``'periodic'``: Fill in values from the other side, keeping
the order.
``'order0'``: Extend constantly with the outmost values
(ensures continuity).
``'order1'``: Extend with constant slope (ensures continuity of
the first derivative). This requires at least 2 values along
each axis where padding is applied.
``'pywt_per'``: like ``'periodic'``-padding but gives the smallest
possible number of decomposition coefficients.
Only available with ``impl='pywt'``, See ``pywt.Modes.modes``.
``'reflect'``: Reflect at the boundary, without repeating the
outmost values.
``'antisymmetric'``: Anti-symmetric variant of ``symmetric``.
``'antireflect'``: Anti-symmetric variant of ``reflect``.
For reference, the following table compares the naming conventions
for the modes in ODL vs. PyWavelets::
======================= ==================
ODL PyWavelets
======================= ==================
symmetric symmetric
reflect reflect
order1 smooth
order0 constant
constant, pad_const=0 zero
periodic periodic
pywt_per periodization
antisymmetric antisymmetric
antireflect antireflect
======================= ==================
See `signal extension modes`_ for an illustration of the modes
(under the PyWavelets naming conventions).
pad_const : float, optional
Constant value to use if ``pad_mode == 'constant'``. Ignored
otherwise. Constants other than 0 are not supported by the
``pywt`` back-end.
impl : {'pywt'}, optional
Back-end for the wavelet transform.
axes : sequence of ints, optional
Axes over which the DWT that created ``coeffs`` was performed. The
default value of ``None`` corresponds to all axes. When not all
axes are included this is analagous to a batch transform in
``len(axes)`` dimensions looped over the non-transformed axes. In
orther words, filtering and decimation does not occur along any
axes not in ``axes``.
Examples
--------
Check that the inverse is the actual inverse on a simple example on
a discrete 2D space with 4 sampling points per axis:
>>> space = odl.uniform_discr([0, 0], [1, 1], (4, 4))
>>> wavelet_trafo = odl.trafos.WaveletTransform(
... domain=space, nlevels=1, wavelet='haar')
>>> orig_array = np.array([[1, 1, 1, 1],
... [0, 0, 0, 0],
... [0, 0, 1, 1],
... [1, 0, 1, 0]])
>>> decomp = wavelet_trafo(orig_array)
>>> recon = wavelet_trafo.inverse(decomp)
>>> np.allclose(recon, orig_array)
True
References
----------
.. _signal extension modes:
https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html
"""
super(WaveletTransformInverse, self).__init__(
space=range, wavelet=wavelet, variant='inverse', nlevels=nlevels,
pad_mode=pad_mode, pad_const=pad_const, impl=impl, axes=axes)
def _call(self, coeffs):
"""Return the inverse wavelet transform of ``coeffs``."""
if self.impl == 'pywt':
coeffs = pywt.unravel_coeffs(coeffs,
coeff_slices=self._coeff_slices,
coeff_shapes=self._coeff_shapes,
output_format='wavedecn')
recon = pywt.waverecn(
coeffs, wavelet=self.pywt_wavelet, mode=self.pywt_pad_mode,
axes=self.axes)
recon_shape = self.range.shape
if recon.shape != recon_shape:
# If the original shape was odd along any transformed axes it
# will have been rounded up to the next even size after the
# reconstruction. The extra sample should be discarded.
# The underlying reason is decimation by two in reconstruction
# must keep ceil(N/2) samples in each band for perfect
# reconstruction. Reconstruction then upsamples by two.
# When N is odd, (2 * np.ceil(N/2)) != N.
recon_slc = []
for i, (n_recon, n_intended) in enumerate(zip(recon.shape,
recon_shape)):
if n_recon == n_intended + 1:
# Upsampling added one entry too much in this axis,
# drop last one
recon_slc.append(slice(-1))
elif n_recon == n_intended:
recon_slc.append(slice(None))
else:
raise ValueError(
'in axis {}: expected size {} or {} in '
'`recon_shape`, got {}'
''.format(i, n_recon - 1, n_recon,
n_intended))
recon = recon[tuple(recon_slc)]
return recon
else:
raise RuntimeError("bad `impl` '{}'".format(self.impl))
@property
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `WaveletTransform`
If the transform is orthogonal, the adjoint is the inverse.
Raises
------
OpNotImplementedError
if `is_orthogonal` is ``False``
See Also
--------
inverse
"""
if self.is_orthogonal:
scale = self.range.partition.cell_volume
return scale * self.inverse
else:
# TODO: put adjoint here
return super(WaveletTransformInverse, self).adjoint
@property
def inverse(self):
"""Inverse of this operator.
Returns
-------
inverse : `WaveletTransform`
See Also
--------
adjoint
"""
return WaveletTransform(
domain=self.range, wavelet=self.pywt_wavelet, nlevels=self.nlevels,
pad_mode=self.pad_mode, pad_const=self.pad_const, impl=self.impl,
axes=self.axes)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests(skip_if=not PYWT_AVAILABLE)
|
mpl-2.0
|
595906d1611d43eaffcac2e4bea984be
| 37.444284
| 86
| 0.540599
| 4.509421
| false
| false
| false
| false
|
odlgroup/odl
|
doc/source/getting_started/code/getting_started_convolution.py
|
2
|
3383
|
"""Source code for the getting started example."""
import odl
import scipy.signal
class Convolution(odl.Operator):
"""Operator calculating the convolution of a kernel with a function.
The operator inherits from ``odl.Operator`` to be able to be used with ODL.
"""
def __init__(self, kernel):
"""Initialize a convolution operator with a known kernel."""
# Store the kernel
self.kernel = kernel
# Initialize the Operator class by calling its __init__ method.
# This sets properties such as domain and range and allows the other
# operator convenience functions to work.
super(Convolution, self).__init__(
domain=kernel.space, range=kernel.space, linear=True)
def _call(self, x):
"""Implement calling the operator by calling scipy."""
return scipy.signal.fftconvolve(self.kernel, x, mode='same')
@property # making adjoint a property lets users access it as A.adjoint
def adjoint(self):
return self # the adjoint is the same as this operator
# Define the space the problem should be solved on.
# Here the square [-1, 1] x [-1, 1] discretized on a 100x100 grid.
space = odl.uniform_discr([-1, -1], [1, 1], [100, 100])
# Convolution kernel, a small centered rectangle.
kernel = odl.phantom.cuboid(space, [-0.05, -0.05], [0.05, 0.05])
# Create convolution operator
A = Convolution(kernel)
# Create phantom (the "unknown" solution)
phantom = odl.phantom.shepp_logan(space, modified=True)
# Apply convolution to phantom to create data
g = A(phantom)
# Display the results using the show method
kernel.show('kernel')
phantom.show('phantom')
g.show('convolved phantom')
# Landweber
# Need operator norm for step length (omega)
opnorm = odl.power_method_opnorm(A)
f = space.zero()
odl.solvers.landweber(A, f, g, niter=100, omega=1 / opnorm ** 2)
f.show('landweber')
# Conjugate gradient
f = space.zero()
odl.solvers.conjugate_gradient_normal(A, f, g, niter=100)
f.show('conjugate gradient')
# Tikhonov with identity
B = odl.IdentityOperator(space)
a = 0.1
T = A.adjoint * A + a * B.adjoint * B
b = A.adjoint(g)
f = space.zero()
odl.solvers.conjugate_gradient(T, f, b, niter=100)
f.show('Tikhonov identity conjugate gradient')
# Tikhonov with gradient
B = odl.Gradient(space)
a = 0.0001
T = A.adjoint * A + a * B.adjoint * B
b = A.adjoint(g)
f = space.zero()
odl.solvers.conjugate_gradient(T, f, b, niter=100)
f.show('Tikhonov gradient conjugate gradient')
# Douglas-Rachford
# Assemble all operators into a list.
grad = odl.Gradient(space)
lin_ops = [A, grad]
a = 0.001
# Create functionals for the l2 distance and l1 norm.
g_funcs = [odl.solvers.L2NormSquared(space).translated(g),
a * odl.solvers.L1Norm(grad.range)]
# Functional of the bound constraint 0 <= f <= 1
f = odl.solvers.IndicatorBox(space, 0, 1)
# Find scaling constants so that the solver converges.
# See the douglas_rachford_pd documentation for more information.
opnorm_A = odl.power_method_opnorm(A, xstart=g)
opnorm_grad = odl.power_method_opnorm(grad, xstart=g)
sigma = [1 / opnorm_A**2, 1 / opnorm_grad**2]
tau = 1.0
# Solve using the Douglas-Rachford Primal-Dual method
x = space.zero()
odl.solvers.douglas_rachford_pd(x, f, g_funcs, lin_ops,
tau=tau, sigma=sigma, niter=100)
x.show('TV Douglas-Rachford', force_show=True)
|
mpl-2.0
|
80f34bd0de4ca757ed2eb0bfc087880a
| 27.91453
| 79
| 0.687851
| 3.045005
| false
| false
| false
| false
|
odlgroup/odl
|
examples/solvers/pdhg_deconvolve.py
|
2
|
2847
|
"""Total variation deconvolution using PDHG.
Solves the optimization problem
min_x 1/2 ||A(x) - g||_2^2 + lam || |grad(x)| ||_1
Where ``A`` is a convolution operator, ``grad`` the spatial gradient and ``g``
is given noisy data.
For further details and a description of the solution method used, see
https://odlgroup.github.io/odl/guide/pdhg_guide.html in the ODL documentation.
"""
import numpy as np
import odl
# Discretization parameters
n = 128
# Discretized spaces
space = odl.uniform_discr([0, 0], [n, n], [n, n])
# Initialize convolution operator by Fourier formula
# conv(f, g) = F^{-1}[F[f] * F[g]]
# Where F[.] is the Fourier transform and the fourier transform of a guassian
# with standard deviation filter_width is another gaussian with width
# 1 / filter_width
filter_width = 3.0 # standard deviation of the Gaussian filter
ft = odl.trafos.FourierTransform(space)
c = filter_width ** 2 / 4.0 ** 2
gaussian = ft.range.element(lambda x: np.exp(-(x[0] ** 2 + x[1] ** 2) * c))
convolution = ft.inverse * gaussian * ft
# Optional: Run diagnostics to assure the adjoint is properly implemented
# odl.diagnostics.OperatorTest(conv_op).run_tests()
# Create phantom
phantom = odl.phantom.shepp_logan(space, modified=True)
# Create the convolved version of the phantom
data = convolution(phantom)
data += odl.phantom.white_noise(convolution.range) * np.mean(data) * 0.1
data.show('Convolved Data')
# Set up PDHG:
# Initialize gradient operator
gradient = odl.Gradient(space, method='forward')
# Column vector of two operators
op = odl.BroadcastOperator(convolution, gradient)
# Create the functional for unconstrained primal variable
f = odl.solvers.ZeroFunctional(op.domain)
# l2-squared data matching
l2_norm_squared = odl.solvers.L2NormSquared(space).translated(data)
# Isotropic TV-regularization i.e. the l1-norm
l1_norm = 0.01 * odl.solvers.L1Norm(gradient.range)
# Make separable sum of functionals, order must be the same as in `op`
g = odl.solvers.SeparableSum(l2_norm_squared, l1_norm)
# --- Select solver parameters and solve using PDHG --- #
# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op)
niter = 300 # Number of iterations
tau = 10.0 / op_norm # Step size for the primal variable
sigma = 0.1 / op_norm # Step size for the dual variables
# Optionally pass callback to the solver to display intermediate results
callback = (odl.solvers.CallbackPrintIteration() &
odl.solvers.CallbackShow(step=20))
# Choose a starting point
x = op.domain.zero()
# Run the algorithm
odl.solvers.pdhg(x, f, g, op, niter=niter, tau=tau, sigma=sigma,
callback=callback)
# Display images
phantom.show(title='Original Image')
data.show(title='Convolved Image')
x.show(title='Deconvolved Image', force_show=True)
|
mpl-2.0
|
6aec45a5d378af6798bdd87165484fab
| 31.352273
| 79
| 0.721812
| 3.074514
| false
| false
| false
| false
|
odlgroup/odl
|
examples/tomo/ray_trafo_helical_cone_3d.py
|
2
|
1795
|
"""Example using the ray transform with helical cone beam geometry."""
import numpy as np
import odl
# Reconstruction space: discretized functions on the cube
# [-20, 20]^2 x [0, 40] with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_pt=[-20, -20, 0], max_pt=[20, 20, 40], shape=[300, 300, 300],
dtype='float32')
# Make a helical cone beam geometry with flat detector
# Angles: uniformly spaced, n = 2000, min = 0, max = 8 * 2 * pi
angle_partition = odl.uniform_partition(0, 8 * 2 * np.pi, 2000)
# Detector: uniformly sampled, n = (512, 64), min = (-50, -3), max = (50, 3)
detector_partition = odl.uniform_partition([-50, -3], [50, 3], [512, 64])
# Spiral has a pitch of 5, we run 8 rounds (due to max angle = 8 * 2 * pi)
geometry = odl.tomo.ConeBeamGeometry(
angle_partition, detector_partition, src_radius=100, det_radius=100,
pitch=5.0)
# Ray transform (= forward projection).
ray_trafo = odl.tomo.RayTransform(reco_space, geometry)
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
# Create projection data by calling the ray transform on the phantom
proj_data = ray_trafo(phantom)
# Back-projection can be done by simply calling the adjoint operator on the
# projection data (or any element in the projection space).
backproj = ray_trafo.adjoint(proj_data)
# Shows a slice of the phantom, projections, and reconstruction
phantom.show(coords=[None, None, 20], title='Phantom, Middle Z Slice')
proj_data.show(coords=[2 * np.pi, None, None],
title='Projection After Exactly One Turn')
proj_data.show(coords=[None, None, 0], title='Sinogram, Middle Slice')
backproj.show(coords=[None, None, 20], title='Back-projection, Middle Z Slice',
force_show=True)
|
mpl-2.0
|
62fd3766068fb6e722482976c8828f3a
| 42.780488
| 79
| 0.702507
| 3.068376
| false
| false
| true
| false
|
odlgroup/odl
|
examples/solvers/adupdates_tomography.py
|
2
|
6365
|
r"""Total-variation regularized tomography example using the adupdates solver.
This example solves a linear inverse problem of the form :math:`Ax = y`, where
:math:`x \in \mathbb{R}^n` is the (unknown) original image to be reconstructed,
:math:`y \in \mathbb{R}^m` is the noisy data and :math:`A \in \mathbb{R}^{m
\times n}` is the measurement matrix describing the discretized physical model.
To solve this problem, we first split the measurement matrix and the data into
:math:`m` parts and solve the least squares problem
..math::
\min_{x\in\mathbb{R}^n} D(x) := \sum_{j = 1}^m \| A_j x - y_j \|^2.
To regularize, we add terms for the total variation and for a functional which
guarantees that the solution will be pointwise nonnegative. The variational
regularization problem we are solving is therefore
..math::
\min_{x\in \mathbb{R}^n_+} D(x) + \sum_{i = 1}^d (\| \partial_{i, 1} x \|_1
+ \| \partial_{i, 2} x \|_1.
Here, :math:`\partial_{i, 1}` and :math:`\partial_{i, 2}` contain the even and
odd components, respectively, of the discretized :math:`i`-th partial
derivative, and :math:`d` is the dimension of the tomography problem. In this
example, :math:`d = 2`. We solve the problem with the alternating dual updates
method. For further details, see
`[MF2015] <http://ieeexplore.ieee.org/document/7271047/>`_.
References
----------
[MF2015] McGaffin, M G, and Fessler, J A. *Alternating dual updates
algorithm for X-ray CT reconstruction on the GPU*. IEEE Transactions
on Computational Imaging, 1.3 (2015), pp 186--199.
"""
import numpy as np
import odl
# The following parameters determine how to split the data (sinograms) and if
# the solver should do the inner iterations in a fixed order or at random.
SPLIT_METHOD = 'interlaced' # How to split the data ('block' or 'interlaced')?
SPLIT_NUMBER = 20 # How many pieces of data?
RANDOM = True # Choose the oder of the inner iterations at random?
# --- Create simulated data (phantom) ---
# Reconstruction space: Set of two-dimensional quadratic images.
reco_space = odl.uniform_discr(min_pt=[-40.0, -40.0],
max_pt=[40.0, 40.0],
shape=[1024, 1024])
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
# Create the forward operators. They correspond to a fully sampled parallel
# beam geometry.
geometry = odl.tomo.parallel_beam_geometry(reco_space)
if SPLIT_METHOD == 'block':
# Split the data into blocks:
# 111 222 333
ns = geometry.angles.size // SPLIT_NUMBER
ray_trafos = [odl.tomo.RayTransform(reco_space,
geometry[i * ns:(i + 1) * ns])
for i in range(SPLIT_NUMBER)]
elif SPLIT_METHOD == 'interlaced':
# Split the data into slices:
# 123 123 123
ray_trafos = [odl.tomo.RayTransform(reco_space,
geometry[i::SPLIT_NUMBER])
for i in range(SPLIT_NUMBER)]
else:
raise ValueError('unknown data split`{}`: typo?'.format(SPLIT_METHOD))
# Create the artificial data.
data_spaces = [op.range for op in ray_trafos]
noisefree_data = [op(phantom) for op in ray_trafos]
data = [proj + 0.10 * np.ptp(proj) * odl.phantom.white_noise(proj.space)
for proj in noisefree_data]
# Functionals and operators for the total variation. This is the l1 norm of the
# (discretized) gradient of the reconstruction. For each of the dimensions
# we create two functionals and two operators.
# Start with empty lists ...
tv_functionals = []
tv_operators = []
tv_stepsizes = []
# ... and for each dimension of the reconstruction space ...
reco_shape = reco_space.shape
reco_dim = len(reco_shape)
for dim in range(reco_dim):
# ... add two operators taking only the even and odd elements,
# respectively, in that dimension.
partial_der = odl.PartialDerivative(
reco_space, dim, pad_mode='order0')
all_points = list(np.ndindex(reco_shape))
even_pts = [list(p) for p in all_points if p[dim] % 2 == 0]
even_pts = np.array(even_pts).T.tolist()
odd_pts = [list(p) for p in all_points if p[dim] % 2 == 1]
odd_pts = np.array(odd_pts).T.tolist()
op1 = reco_space.cell_sides[dim] * odl.SamplingOperator(
reco_space, even_pts) * partial_der
op2 = reco_space.cell_sides[dim] * odl.SamplingOperator(
reco_space, odd_pts) * partial_der
tv_functionals += [odl.solvers.L1Norm(op1.range),
odl.solvers.L1Norm(op2.range)]
tv_operators += [op1, op2]
tv_stepsizes += [0.5 / reco_shape[dim], 0.5 / reco_shape[dim]]
# Functional and operator enforcing the nonnegativity of the image.
nonneg_functional = odl.solvers.IndicatorNonnegativity(reco_space)
nonneg_operator = odl.IdentityOperator(reco_space)
nonneg_stepsize = 1.0
# ... and the data fit functionals. The coefficient is a regularization
# paratemeter, which determines the tradeoff between data fit and regularity.
data_fit_functionals = [1.0 *
odl.solvers.L2NormSquared(ds).translated(rhs)
for (ds, rhs) in zip(data_spaces, data)]
# In the stepsizes, we avoid the possible division by zero by adding a small
# positive value. The matrix corresponding to the operator `op` has only
# nonnegative entries, which ensures that the final results are positve.
data_fit_stepsizes = [1.0 / (1e-6 + op(op.adjoint(ds.one())))
for (ds, op) in zip(data_spaces, ray_trafos)]
# Alternative choice without vector-valued stepsizes could be
# data_fit_stepsizes = [1.0 / op.norm(estimate=True) ** 2 for op in ray_trafos]
# Now we build up the ingredients of our algorithm:
# Start at a zero image, ...
x = reco_space.zero()
# ... collect all the functionals, ...
g = [nonneg_functional] + data_fit_functionals + tv_functionals
# ... collect all the operators, ...
L = [nonneg_operator] + ray_trafos + tv_operators
# ... and collect all the inner stepsizes, which were chosen according to the
# properties of the operators in `L`.
inner_stepsizes = [nonneg_stepsize] + data_fit_stepsizes + tv_stepsizes
odl.solvers.adupdates(x, g, L, stepsize=1.0, inner_stepsizes=inner_stepsizes,
niter=5, random=RANDOM, callback=None,
callback_loop=None)
# Show the result within a window between zero and one.
x.show(vmin=0.0, vmax=1.0)
|
mpl-2.0
|
932cc1996868590bdb01d1f466a49820
| 42.006757
| 79
| 0.671956
| 3.213024
| false
| false
| false
| false
|
odlgroup/odl
|
odl/operator/tensor_ops.py
|
2
|
61291
|
# Copyright 2014-2020 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Operators defined for tensor fields."""
from __future__ import absolute_import, division, print_function
from numbers import Integral
import numpy as np
from odl.operator.operator import Operator
from odl.set import ComplexNumbers, RealNumbers
from odl.space import ProductSpace, tensor_space
from odl.space.base_tensors import TensorSpace
from odl.space.weighting import ArrayWeighting
from odl.util import dtype_repr, indent, signature_string, writable_array
__all__ = ('PointwiseNorm', 'PointwiseInner', 'PointwiseSum', 'MatrixOperator',
'SamplingOperator', 'WeightedSumSamplingOperator',
'FlatteningOperator')
_SUPPORTED_DIFF_METHODS = ('central', 'forward', 'backward')
class PointwiseTensorFieldOperator(Operator):
"""Abstract operator for point-wise tensor field manipulations.
A point-wise operator acts on a space of vector or tensor fields,
i.e. a power space ``X^d`` of a discretized function space ``X``.
Its range is the power space ``X^k`` with a possibly different
number ``k`` of components. For ``k == 1``, the base space
``X`` can be used instead.
For example, if ``X`` is a `DiscretizedSpace` space, then
``ProductSpace(X, d)`` is a valid domain for any positive integer
``d``. It is also possible to have tensor fields over tensor fields, i.e.
``ProductSpace(ProductSpace(X, n), m)``.
.. note::
It is allowed that ``domain``, ``range`` and ``base_space`` use
different ``dtype``. Correctness for, e.g., real-to-complex mappings
is not guaranteed in that case.
See Also
--------
odl.space.pspace.ProductSpace
"""
def __init__(self, domain, range, base_space, linear=False):
"""Initialize a new instance.
Parameters
----------
domain, range : {`ProductSpace`, `LinearSpace`}
Spaces of vector fields between which the operator maps.
They have to be either power spaces of the same base space
``X`` (up to ``dtype``), or the base space itself.
Empty product spaces are not allowed.
base_space : `LinearSpace`
The base space ``X``.
linear : bool, optional
If ``True``, assume that the operator is linear.
"""
if not is_compatible_space(domain, base_space):
raise ValueError(
'`domain` {!r} is not compatible with `base_space` {!r}'
''.format(domain, base_space))
if not is_compatible_space(range, base_space):
raise ValueError(
'`range` {!r} is not compatible with `base_space` {!r}'
''.format(range, base_space))
super(PointwiseTensorFieldOperator, self).__init__(
domain=domain, range=range, linear=linear)
self.__base_space = base_space
@property
def base_space(self):
"""Base space ``X`` of this operator's domain and range."""
return self.__base_space
class PointwiseNorm(PointwiseTensorFieldOperator):
"""Take the point-wise norm of a vector field.
This operator computes the (weighted) p-norm in each point of
a vector field, thus producing a scalar-valued function.
It implements the formulas ::
||F(x)|| = [ sum_j( w_j * |F_j(x)|^p ) ]^(1/p)
for ``p`` finite and ::
||F(x)|| = max_j( w_j * |F_j(x)| )
for ``p = inf``, where ``F`` is a vector field. This implies that
the `Operator.domain` is a power space of a discretized function
space. For example, if ``X`` is a `DiscretizedSpace` space, then
``ProductSpace(X, d)`` is a valid domain for any positive integer
``d``.
"""
def __init__(self, vfspace, exponent=None, weighting=None):
"""Initialize a new instance.
Parameters
----------
vfspace : `ProductSpace`
Space of vector fields on which the operator acts.
It has to be a product space of identical spaces, i.e. a
power space.
exponent : non-zero float, optional
Exponent of the norm in each point. Values between
0 and 1 are currently not supported due to numerical
instability.
Default: ``vfspace.exponent``
weighting : `array-like` or positive float, optional
Weighting array or constant for the norm. If an array is
given, its length must be equal to ``len(domain)``, and
all entries must be positive.
By default, the weights are is taken from
``domain.weighting``. Note that this excludes unusual
weightings with custom inner product, norm or dist.
Examples
--------
We make a tiny vector field space in 2D and create the
standard point-wise norm operator on that space. The operator
maps a vector field to a scalar function:
>>> spc = odl.uniform_discr([-1, -1], [1, 1], (1, 2))
>>> vfspace = odl.ProductSpace(spc, 2)
>>> pw_norm = odl.PointwiseNorm(vfspace)
>>> pw_norm.range == spc
True
Now we can calculate the 2-norm in each point:
>>> x = vfspace.element([[[1, -4]],
... [[0, 3]]])
>>> print(pw_norm(x))
[[ 1., 5.]]
We can change the exponent either in the vector field space
or in the operator directly:
>>> vfspace = odl.ProductSpace(spc, 2, exponent=1)
>>> pw_norm = PointwiseNorm(vfspace)
>>> print(pw_norm(x))
[[ 1., 7.]]
>>> vfspace = odl.ProductSpace(spc, 2)
>>> pw_norm = PointwiseNorm(vfspace, exponent=1)
>>> print(pw_norm(x))
[[ 1., 7.]]
"""
if not isinstance(vfspace, ProductSpace):
raise TypeError('`vfspace` {!r} is not a ProductSpace '
'instance'.format(vfspace))
super(PointwiseNorm, self).__init__(
domain=vfspace, range=vfspace[0], base_space=vfspace[0],
linear=False)
# Need to check for product space shape once higher order tensors
# are implemented
if exponent is None:
if self.domain.exponent is None:
raise ValueError('cannot determine `exponent` from {}'
''.format(self.domain))
self._exponent = self.domain.exponent
elif exponent < 1:
raise ValueError('`exponent` smaller than 1 not allowed')
else:
self._exponent = float(exponent)
# Handle weighting, including sanity checks
if weighting is None:
# TODO: find a more robust way of getting the weights as an array
if hasattr(self.domain.weighting, 'array'):
self.__weights = self.domain.weighting.array
elif hasattr(self.domain.weighting, 'const'):
self.__weights = (self.domain.weighting.const *
np.ones(len(self.domain)))
else:
raise ValueError('weighting scheme {!r} of the domain does '
'not define a weighting array or constant'
''.format(self.domain.weighting))
elif np.isscalar(weighting):
if weighting <= 0:
raise ValueError('weighting constant must be positive, got '
'{}'.format(weighting))
self.__weights = float(weighting) * np.ones(len(self.domain))
else:
self.__weights = np.asarray(weighting, dtype='float64')
if (not np.all(self.weights > 0) or
not np.all(np.isfinite(self.weights))):
raise ValueError('weighting array {} contains invalid '
'entries'.format(weighting))
self.__is_weighted = not np.array_equiv(self.weights, 1.0)
@property
def exponent(self):
"""Exponent ``p`` of this norm."""
return self._exponent
@property
def weights(self):
"""Weighting array of this operator."""
return self.__weights
@property
def is_weighted(self):
"""``True`` if weighting is not 1 or all ones."""
return self.__is_weighted
def _call(self, f, out):
"""Implement ``self(f, out)``."""
if self.exponent == 1.0:
self._call_vecfield_1(f, out)
elif self.exponent == float('inf'):
self._call_vecfield_inf(f, out)
else:
self._call_vecfield_p(f, out)
def _call_vecfield_1(self, vf, out):
"""Implement ``self(vf, out)`` for exponent 1."""
vf[0].ufuncs.absolute(out=out)
if self.is_weighted:
out *= self.weights[0]
if len(self.domain) == 1:
return
tmp = self.range.element()
for fi, wi in zip(vf[1:], self.weights[1:]):
fi.ufuncs.absolute(out=tmp)
if self.is_weighted:
tmp *= wi
out += tmp
def _call_vecfield_inf(self, vf, out):
"""Implement ``self(vf, out)`` for exponent ``inf``."""
vf[0].ufuncs.absolute(out=out)
if self.is_weighted:
out *= self.weights[0]
if len(self.domain) == 1:
return
tmp = self.range.element()
for vfi, wi in zip(vf[1:], self.weights[1:]):
vfi.ufuncs.absolute(out=tmp)
if self.is_weighted:
tmp *= wi
out.ufuncs.maximum(tmp, out=out)
def _call_vecfield_p(self, vf, out):
"""Implement ``self(vf, out)`` for exponent 1 < p < ``inf``."""
# Optimization for 1 component - just absolute value (maybe weighted)
if len(self.domain) == 1:
vf[0].ufuncs.absolute(out=out)
if self.is_weighted:
out *= self.weights[0] ** (1 / self.exponent)
return
# Initialize out, avoiding one copy
self._abs_pow_ufunc(vf[0], out=out, p=self.exponent)
if self.is_weighted:
out *= self.weights[0]
tmp = self.range.element()
for fi, wi in zip(vf[1:], self.weights[1:]):
self._abs_pow_ufunc(fi, out=tmp, p=self.exponent)
if self.is_weighted:
tmp *= wi
out += tmp
self._abs_pow_ufunc(out, out=out, p=(1 / self.exponent))
def _abs_pow_ufunc(self, fi, out, p):
"""Compute |F_i(x)|^p point-wise and write to ``out``."""
# Optimization for very common cases
if p == 0.5:
fi.ufuncs.absolute(out=out)
out.ufuncs.sqrt(out=out)
elif p == 2.0 and self.base_space.field == RealNumbers():
fi.multiply(fi, out=out)
else:
fi.ufuncs.absolute(out=out)
out.ufuncs.power(p, out=out)
def derivative(self, vf):
"""Derivative of the point-wise norm operator at ``vf``.
The derivative at ``F`` of the point-wise norm operator ``N``
with finite exponent ``p`` and weights ``w`` is the pointwise
inner product with the vector field ::
x --> N(F)(x)^(1-p) * [ F_j(x) * |F_j(x)|^(p-2) ]_j
Note that this is not well-defined for ``F = 0``. If ``p < 2``,
any zero component will result in a singularity.
Parameters
----------
vf : `domain` `element-like`
Vector field ``F`` at which to evaluate the derivative.
Returns
-------
deriv : `PointwiseInner`
Derivative operator at the given point ``vf``.
Raises
------
NotImplementedError
* if the vector field space is complex, since the derivative
is not linear in that case
* if the exponent is ``inf``
"""
if self.domain.field == ComplexNumbers():
raise NotImplementedError('operator not Frechet-differentiable '
'on a complex space')
if self.exponent == float('inf'):
raise NotImplementedError('operator not Frechet-differentiable '
'for exponent = inf')
vf = self.domain.element(vf)
vf_pwnorm_fac = self(vf)
if self.exponent != 2: # optimize away most common case.
vf_pwnorm_fac **= (self.exponent - 1)
inner_vf = vf.copy()
for gi in inner_vf:
gi *= gi.ufuncs.absolute().ufuncs.power(self.exponent - 2)
if self.exponent >= 2:
# Any component that is zero is not divided with
nz = (vf_pwnorm_fac.asarray() != 0)
gi[nz] /= vf_pwnorm_fac[nz]
else:
# For exponents < 2 there will be a singularity if any
# component is zero. This results in inf or nan. See the
# documentation for further details.
gi /= vf_pwnorm_fac
return PointwiseInner(self.domain, inner_vf, weighting=self.weights)
class PointwiseInnerBase(PointwiseTensorFieldOperator):
"""Base class for `PointwiseInner` and `PointwiseInnerAdjoint`.
Implemented to allow code reuse between the classes.
"""
def __init__(self, adjoint, vfspace, vecfield, weighting=None):
"""Initialize a new instance.
All parameters are given according to the specifics of the "usual"
operator. The ``adjoint`` parameter is used to control conversions
for the inverse transform.
Parameters
----------
adjoint : bool
``True`` if the operator should be the adjoint, ``False``
otherwise.
vfspace : `ProductSpace`
Space of vector fields on which the operator acts.
It has to be a product space of identical spaces, i.e. a
power space.
vecfield : ``vfspace`` `element-like`
Vector field with which to calculate the point-wise inner
product of an input vector field
weighting : `array-like` or float, optional
Weighting array or constant for the norm. If an array is
given, its length must be equal to ``len(domain)``.
By default, the weights are is taken from
``domain.weighting``. Note that this excludes unusual
weightings with custom inner product, norm or dist.
"""
if not isinstance(vfspace, ProductSpace):
raise TypeError('`vfsoace` {!r} is not a ProductSpace '
'instance'.format(vfspace))
if adjoint:
super(PointwiseInnerBase, self).__init__(
domain=vfspace[0], range=vfspace, base_space=vfspace[0],
linear=True)
else:
super(PointwiseInnerBase, self).__init__(
domain=vfspace, range=vfspace[0], base_space=vfspace[0],
linear=True)
# Bail out if the space is complex but we cannot take the complex
# conjugate.
if (vfspace.field == ComplexNumbers() and
not hasattr(self.base_space.element_type, 'conj')):
raise NotImplementedError(
'base space element type {!r} does not implement conj() '
'method required for complex inner products'
''.format(self.base_space.element_type))
self._vecfield = vfspace.element(vecfield)
# Handle weighting, including sanity checks
if weighting is None:
if hasattr(vfspace.weighting, 'array'):
self.__weights = vfspace.weighting.array
elif hasattr(vfspace.weighting, 'const'):
self.__weights = (vfspace.weighting.const *
np.ones(len(vfspace)))
else:
raise ValueError('weighting scheme {!r} of the domain does '
'not define a weighting array or constant'
''.format(vfspace.weighting))
elif np.isscalar(weighting):
self.__weights = float(weighting) * np.ones(len(vfspace))
else:
self.__weights = np.asarray(weighting, dtype='float64')
self.__is_weighted = not np.array_equiv(self.weights, 1.0)
@property
def vecfield(self):
"""Fixed vector field ``G`` of this inner product."""
return self._vecfield
@property
def weights(self):
"""Weighting array of this operator."""
return self.__weights
@property
def is_weighted(self):
"""``True`` if weighting is not 1 or all ones."""
return self.__is_weighted
@property
def adjoint(self):
"""Adjoint operator."""
raise NotImplementedError('abstract method')
class PointwiseInner(PointwiseInnerBase):
"""Take the point-wise inner product with a given vector field.
This operator takes the (weighted) inner product ::
<F(x), G(x)> = sum_j ( w_j * F_j(x) * conj(G_j(x)) )
for a given vector field ``G``, where ``F`` is the vector field
acting as a variable to this operator.
This implies that the `Operator.domain` is a power space of a
discretized function space. For example, if ``X`` is a `DiscretizedSpace`
space, then ``ProductSpace(X, d)`` is a valid domain for any
positive integer ``d``.
"""
def __init__(self, vfspace, vecfield, weighting=None):
"""Initialize a new instance.
Parameters
----------
vfspace : `ProductSpace`
Space of vector fields on which the operator acts.
It has to be a product space of identical spaces, i.e. a
power space.
vecfield : ``vfspace`` `element-like`
Vector field with which to calculate the point-wise inner
product of an input vector field
weighting : `array-like` or float, optional
Weighting array or constant for the norm. If an array is
given, its length must be equal to ``len(domain)``, and
all entries must be positive.
By default, the weights are is taken from
``domain.weighting``. Note that this excludes unusual
weightings with custom inner product, norm or dist.
Examples
--------
We make a tiny vector field space in 2D and create the
point-wise inner product operator with a fixed vector field.
The operator maps a vector field to a scalar function:
>>> spc = odl.uniform_discr([-1, -1], [1, 1], (1, 2))
>>> vfspace = odl.ProductSpace(spc, 2)
>>> fixed_vf = np.array([[[0, 1]],
... [[1, -1]]])
>>> pw_inner = PointwiseInner(vfspace, fixed_vf)
>>> pw_inner.range == spc
True
Now we can calculate the inner product in each point:
>>> x = vfspace.element([[[1, -4]],
... [[0, 3]]])
>>> print(pw_inner(x))
[[ 0., -7.]]
"""
super(PointwiseInner, self).__init__(
adjoint=False, vfspace=vfspace, vecfield=vecfield,
weighting=weighting)
@property
def vecfield(self):
"""Fixed vector field ``G`` of this inner product."""
return self._vecfield
def _call(self, vf, out):
"""Implement ``self(vf, out)``."""
if self.domain.field == ComplexNumbers():
vf[0].multiply(self._vecfield[0].conj(), out=out)
else:
vf[0].multiply(self._vecfield[0], out=out)
if self.is_weighted:
out *= self.weights[0]
if len(self.domain) == 1:
return
tmp = self.range.element()
for vfi, gi, wi in zip(vf[1:], self.vecfield[1:],
self.weights[1:]):
if self.domain.field == ComplexNumbers():
vfi.multiply(gi.conj(), out=tmp)
else:
vfi.multiply(gi, out=tmp)
if self.is_weighted:
tmp *= wi
out += tmp
@property
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `PointwiseInnerAdjoint`
"""
return PointwiseInnerAdjoint(
sspace=self.base_space, vecfield=self.vecfield,
vfspace=self.domain, weighting=self.weights)
class PointwiseInnerAdjoint(PointwiseInnerBase):
"""Adjoint of the point-wise inner product operator.
The adjoint of the inner product operator is a mapping ::
A^* : X --> X^d
If the vector field space ``X^d`` is weighted by a vector ``v``,
the adjoint, applied to a function ``h`` from ``X`` is the vector
field ::
x --> h(x) * (w / v) * G(x),
where ``G`` and ``w`` are the vector field and weighting from the
inner product operator, resp., and all multiplications are understood
component-wise.
"""
def __init__(self, sspace, vecfield, vfspace=None, weighting=None):
"""Initialize a new instance.
Parameters
----------
sspace : `LinearSpace`
"Scalar" space on which the operator acts
vecfield : range `element-like`
Vector field of the point-wise inner product operator
vfspace : `ProductSpace`, optional
Space of vector fields to which the operator maps. It must
be a power space with ``sspace`` as base space.
This option is intended to enforce an operator range
with a certain weighting.
Default: ``ProductSpace(space, len(vecfield),
weighting=weighting)``
weighting : `array-like` or float, optional
Weighting array or constant of the inner product operator.
If an array is given, its length must be equal to
``len(vecfield)``.
By default, the weights are is taken from
``range.weighting`` if applicable. Note that this excludes
unusual weightings with custom inner product, norm or dist.
"""
if vfspace is None:
vfspace = ProductSpace(sspace, len(vecfield), weighting=weighting)
else:
if not isinstance(vfspace, ProductSpace):
raise TypeError('`vfspace` {!r} is not a '
'ProductSpace instance'.format(vfspace))
if vfspace[0] != sspace:
raise ValueError('base space of the range is different from '
'the given scalar space ({!r} != {!r})'
''.format(vfspace[0], sspace))
super(PointwiseInnerAdjoint, self).__init__(
adjoint=True, vfspace=vfspace, vecfield=vecfield,
weighting=weighting)
# Get weighting from range
if hasattr(self.range.weighting, 'array'):
self.__ran_weights = self.range.weighting.array
elif hasattr(self.range.weighting, 'const'):
self.__ran_weights = (self.range.weighting.const *
np.ones(len(self.range)))
else:
raise ValueError('weighting scheme {!r} of the range does '
'not define a weighting array or constant'
''.format(self.range.weighting))
def _call(self, f, out):
"""Implement ``self(vf, out)``."""
for vfi, oi, ran_wi, dom_wi in zip(self.vecfield, out,
self.__ran_weights, self.weights):
vfi.multiply(f, out=oi)
if not np.isclose(ran_wi, dom_wi):
oi *= dom_wi / ran_wi
@property
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `PointwiseInner`
"""
return PointwiseInner(vfspace=self.range, vecfield=self.vecfield,
weighting=self.weights)
# TODO: Make this an optimized operator on its own.
class PointwiseSum(PointwiseInner):
"""Take the point-wise sum of a vector field.
This operator takes the (weighted) sum ::
sum(F(x)) = [ sum_j( w_j * F_j(x) ) ]
where ``F`` is a vector field. This implies that
the `Operator.domain` is a power space of a discretized function
space. For example, if ``X`` is a `DiscretizedSpace` space, then
``ProductSpace(X, d)`` is a valid domain for any positive integer
``d``.
"""
def __init__(self, vfspace, weighting=None):
"""Initialize a new instance.
Parameters
----------
vfspace : `ProductSpace`
Space of vector fields on which the operator acts.
It has to be a product space of identical spaces, i.e. a
power space.
weighting : `array-like` or float, optional
Weighting array or constant for the sum. If an array is
given, its length must be equal to ``len(domain)``.
By default, the weights are is taken from
``domain.weighting``. Note that this excludes unusual
weightings with custom inner product, norm or dist.
Examples
--------
We make a tiny vector field space in 2D and create the
standard point-wise sum operator on that space. The operator
maps a vector field to a scalar function:
>>> spc = odl.uniform_discr([-1, -1], [1, 1], (1, 2))
>>> vfspace = odl.ProductSpace(spc, 2)
>>> pw_sum = PointwiseSum(vfspace)
>>> pw_sum.range == spc
True
Now we can calculate the sum in each point:
>>> x = vfspace.element([[[1, -4]],
... [[0, 3]]])
>>> print(pw_sum(x))
[[ 1., -1.]]
"""
if not isinstance(vfspace, ProductSpace):
raise TypeError('`vfspace` {!r} is not a ProductSpace '
'instance'.format(vfspace))
ones = vfspace.one()
super(PointwiseSum, self).__init__(
vfspace, vecfield=ones, weighting=weighting)
class MatrixOperator(Operator):
"""A matrix acting as a linear operator.
This operator uses a matrix to represent an operator, and get its
adjoint and inverse by doing computations on the matrix. This is in
general a rather slow and memory-inefficient approach, and users are
recommended to use other alternatives if possible.
"""
def __init__(self, matrix, domain=None, range=None, axis=0):
r"""Initialize a new instance.
Parameters
----------
matrix : `array-like` or `scipy.sparse.base.spmatrix`
2-dimensional array representing the linear operator.
For Scipy sparse matrices only tensor spaces with
``ndim == 1`` are allowed as ``domain``.
domain : `TensorSpace`, optional
Space of elements on which the operator can act. Its
``dtype`` must be castable to ``range.dtype``.
For the default ``None``, a space with 1 axis and size
``matrix.shape[1]`` is used, together with the matrix'
data type.
range : `TensorSpace`, optional
Space of elements on to which the operator maps. Its
``shape`` and ``dtype`` attributes must match those
of the result of the multiplication.
For the default ``None``, the range is inferred from
``matrix``, ``domain`` and ``axis``.
axis : int, optional
Sum over this axis of an input tensor in the
multiplication.
Examples
--------
By default, ``domain`` and ``range`` are spaces of with one axis:
>>> m = np.ones((3, 4))
>>> op = MatrixOperator(m)
>>> op.domain
rn(4)
>>> op.range
rn(3)
>>> op([1, 2, 3, 4])
rn(3).element([ 10., 10., 10.])
For multi-dimensional arrays (tensors), the summation
(contraction) can be performed along a specific axis. In
this example, the number of matrix rows (4) must match the
domain shape entry in the given axis:
>>> dom = odl.rn((5, 4, 4)) # can use axis=1 or axis=2
>>> op = MatrixOperator(m, domain=dom, axis=1)
>>> op(dom.one()).shape
(5, 3, 4)
>>> op = MatrixOperator(m, domain=dom, axis=2)
>>> op(dom.one()).shape
(5, 4, 3)
The operator also works on `uniform_discr` type spaces. Note,
however, that the ``weighting`` of the domain is propagated to
the range by default, in order to keep the correspondence between
adjoint and transposed matrix:
>>> space = odl.uniform_discr(0, 1, 4)
>>> op = MatrixOperator(m, domain=space)
>>> op(space.one())
rn(3, weighting=0.25).element([ 4., 4., 4.])
>>> np.array_equal(op.adjoint.matrix, m.T)
True
Notes
-----
For a matrix :math:`A \in \mathbb{F}^{n \times m}`, the
operation on a tensor :math:`T \in \mathbb{F}^{n_1 \times
\dots \times n_d}` is defined as the summation
.. math::
(A \cdot T)_{i_1, \dots, i_k, \dots, i_d} =
\sum_{j=1}^m A_{i_k j} T_{i_1, \dots, j, \dots, i_d}.
It produces a new tensor :math:`A \cdot T \in \mathbb{F}^{
n_1 \times \dots \times n \times \dots \times n_d}`.
"""
# Lazy import to improve `import odl` time
import scipy.sparse
if scipy.sparse.isspmatrix(matrix):
self.__matrix = matrix
else:
self.__matrix = np.array(matrix, copy=False, ndmin=2)
self.__axis, axis_in = int(axis), axis
if self.axis != axis_in:
raise ValueError('`axis` must be integer, got {}'.format(axis_in))
if self.matrix.ndim != 2:
raise ValueError('`matrix` has {} axes instead of 2'
''.format(self.matrix.ndim))
# Infer or check domain
if domain is None:
domain = tensor_space((self.matrix.shape[1],),
dtype=self.matrix.dtype)
else:
if not isinstance(domain, TensorSpace):
raise TypeError('`domain` must be a `TensorSpace` '
'instance, got {!r}'.format(domain))
if scipy.sparse.isspmatrix(self.matrix) and domain.ndim > 1:
raise ValueError('`domain.ndim` > 1 unsupported for '
'scipy sparse matrices')
if domain.shape[axis] != self.matrix.shape[1]:
raise ValueError('`domain.shape[axis]` not equal to '
'`matrix.shape[1]` ({} != {})'
''.format(domain.shape[axis],
self.matrix.shape[1]))
range_shape = list(domain.shape)
range_shape[self.axis] = self.matrix.shape[0]
if range is None:
# Infer range
range_dtype = np.promote_types(self.matrix.dtype, domain.dtype)
if (range_shape != domain.shape and
isinstance(domain.weighting, ArrayWeighting)):
# Cannot propagate weighting due to size mismatch.
weighting = None
else:
weighting = domain.weighting
range = tensor_space(range_shape, dtype=range_dtype,
weighting=weighting,
exponent=domain.exponent)
else:
# Check consistency of range
if not isinstance(range, TensorSpace):
raise TypeError('`range` must be a `TensorSpace` instance, '
'got {!r}'.format(range))
if range.shape != tuple(range_shape):
raise ValueError('expected `range.shape` = {}, got {}'
''.format(tuple(range_shape), range.shape))
# Check compatibility of data types
result_dtype = np.promote_types(domain.dtype, self.matrix.dtype)
if not np.can_cast(result_dtype, range.dtype):
raise ValueError('result data type {} cannot be safely cast to '
'range data type {}'
''.format(dtype_repr(result_dtype),
dtype_repr(range.dtype)))
super(MatrixOperator, self).__init__(domain, range, linear=True)
@property
def matrix(self):
"""Matrix representing this operator."""
return self.__matrix
@property
def axis(self):
"""Axis of domain elements over which is summed."""
return self.__axis
@property
def adjoint(self):
"""Adjoint operator represented by the adjoint matrix.
Returns
-------
adjoint : `MatrixOperator`
"""
return MatrixOperator(self.matrix.conj().T,
domain=self.range, range=self.domain,
axis=self.axis)
@property
def inverse(self):
"""Inverse operator represented by the inverse matrix.
Taking the inverse causes sparse matrices to become dense and is
generally very heavy computationally since the matrix is inverted
numerically (an O(n^3) operation). It is recommended to instead
use one of the solvers available in the ``odl.solvers`` package.
Returns
-------
inverse : `MatrixOperator`
"""
# Lazy import to improve `import odl` time
import scipy.sparse
if scipy.sparse.isspmatrix(self.matrix):
dense_matrix = self.matrix.toarray()
else:
dense_matrix = self.matrix
return MatrixOperator(np.linalg.inv(dense_matrix),
domain=self.range, range=self.domain,
axis=self.axis)
def _call(self, x, out=None):
"""Return ``self(x[, out])``."""
# Lazy import to improve `import odl` time
import scipy.sparse
if out is None:
if scipy.sparse.isspmatrix(self.matrix):
out = self.matrix.dot(x)
else:
dot = np.tensordot(self.matrix, x, axes=(1, self.axis))
# New axis ends up as first, need to swap it to its place
out = np.moveaxis(dot, 0, self.axis)
else:
if scipy.sparse.isspmatrix(self.matrix):
# Unfortunately, there is no native in-place dot product for
# sparse matrices
out[:] = self.matrix.dot(x)
elif self.range.ndim == 1:
with writable_array(out) as out_arr:
self.matrix.dot(x, out=out_arr)
else:
# Could use einsum to have out, but it's damn slow
# TODO: investigate speed issue
dot = np.tensordot(self.matrix, x, axes=(1, self.axis))
# New axis ends up as first, need to move it to its place
out[:] = np.moveaxis(dot, 0, self.axis)
return out
def __repr__(self):
"""Return ``repr(self)``."""
# Lazy import to improve `import odl` time
import scipy.sparse
# Matrix printing itself in an executable way (for dense matrix)
if scipy.sparse.isspmatrix(self.matrix):
# Don't convert to dense, can take forever
matrix_str = repr(self.matrix)
else:
matrix_str = np.array2string(self.matrix, separator=', ')
posargs = [matrix_str]
# Optional arguments with defaults, inferred from the matrix
range_shape = list(self.domain.shape)
range_shape[self.axis] = self.matrix.shape[0]
optargs = [
('domain', self.domain, tensor_space(self.matrix.shape[1],
self.matrix.dtype)),
('range', self.range, tensor_space(range_shape,
self.matrix.dtype)),
('axis', self.axis, 0)
]
inner_str = signature_string(posargs, optargs, sep=[', ', ', ', ',\n'],
mod=[['!s'], ['!r', '!r', '']])
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(inner_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
def _normalize_sampling_points(sampling_points, ndim):
"""Normalize points to an ndim-long list of linear index arrays.
This helper converts sampling indices for `SamplingOperator` from
integers or array-like objects to a list of length ``ndim``, where
each entry is a `numpy.ndarray` with ``dtype=int``.
The function also checks if all arrays have equal lengths, and that
they fulfill ``array.ndim=1`` (or ``size=0`` for if ``ndim == 0``).
The result of this normalization is intended to be used for indexing
an ``ndim``-dimensional array at ``sampling_points`` via NumPy fancy
indexing, i.e., ``result = ndim_array[sampling_points]``.
"""
sampling_points_in = sampling_points
if ndim == 0:
sampling_points = [np.array(sampling_points, dtype=int, copy=False)]
if sampling_points[0].size != 0:
raise ValueError('`sampling_points` must be empty for '
'0-dim. `domain`')
elif ndim == 1:
if isinstance(sampling_points, Integral):
sampling_points = (sampling_points,)
sampling_points = np.array(sampling_points, dtype=int, copy=False,
ndmin=1)
# Handle possible list of length one
if sampling_points.ndim == 2 and sampling_points.shape[0] == 1:
sampling_points = sampling_points[0]
sampling_points = [sampling_points]
if sampling_points[0].ndim > 1:
raise ValueError('expected 1D index (array), got {}'
''.format(sampling_points_in))
else:
try:
iter(sampling_points)
except TypeError:
raise TypeError('`sampling_points` must be a sequence '
'for domain with ndim > 1')
else:
if np.ndim(sampling_points) == 1:
sampling_points = [np.array(p, dtype=int)
for p in sampling_points]
else:
sampling_points = [
np.array(pts, dtype=int, copy=False, ndmin=1)
for pts in sampling_points]
if any(pts.ndim != 1 for pts in sampling_points):
raise ValueError(
'index arrays in `sampling_points` must be 1D, '
'got {!r}'.format(sampling_points_in))
return sampling_points
class SamplingOperator(Operator):
"""Operator that samples coefficients.
The operator is defined by ::
SamplingOperator(f) == c * f[sampling_points]
with the weight ``c`` being determined by the variant. By choosing
``c = 1``, this operator approximates point evaluations or inner
products with Dirac deltas, see option ``variant='point_eval'``.
By choosing ``c = cell_volume``, it approximates the integration of
``f`` over the indexed cells, see option ``variant='integrate'``.
"""
def __init__(self, domain, sampling_points, variant='point_eval'):
"""Initialize a new instance.
Parameters
----------
domain : `TensorSpace`
Set of elements on which this operator acts.
sampling_points : 1D `array-like` or sequence of 1D array-likes
Indices that determine the sampling points.
In n dimensions, it should be a sequence of n arrays, where
each member array is of equal length N. The indexed positions
are ``(arr1[i], arr2[i], ..., arrn[i])``, in total N
points.
If ``domain`` is one-dimensional, a single array-like can be
used. Likewise, a single point can be given as integer in 1D,
and as a array-like sequence in nD.
variant : {'point_eval', 'integrate'}, optional
For ``'point_eval'`` this operator performs the sampling by
evaluation the function at the sampling points. The
``'integrate'`` variant approximates integration by
multiplying point evaluation with the cell volume.
Examples
--------
Sampling in 1d can be done with a single index (an int) or a
sequence of such:
>>> space = odl.uniform_discr(0, 1, 4)
>>> op = odl.SamplingOperator(space, sampling_points=1)
>>> x = space.element([1, 2, 3, 4])
>>> op(x)
rn(1).element([ 2.])
>>> op = odl.SamplingOperator(space, sampling_points=[1, 2, 1])
>>> op(x)
rn(3).element([ 2., 3., 2.])
There are two variants ``'point_eval'`` (default) and
``'integrate'``, where the latter scales values by the cell
volume to approximate the integral over the cells of the points:
>>> op = odl.SamplingOperator(space, sampling_points=[1, 2, 1],
... variant='integrate')
>>> space.cell_volume # the scaling constant
0.25
>>> op(x)
rn(3).element([ 0.5 , 0.75, 0.5 ])
In higher dimensions, a sequence of index array-likes must be
given, or a single sequence for a single point:
>>> space = odl.uniform_discr([0, 0], [1, 1], (2, 3))
>>> # Sample at the index (0, 2)
>>> op = odl.SamplingOperator(space, sampling_points=[0, 2])
>>> x = space.element([[1, 2, 3],
... [4, 5, 6]])
>>> op(x)
rn(1).element([ 3.])
>>> sampling_points = [[0, 1, 1], # indices (0, 2), (1, 1), (1, 0)
... [2, 1, 0]]
>>> op = odl.SamplingOperator(space, sampling_points)
>>> op(x)
rn(3).element([ 3., 5., 4.])
"""
if not isinstance(domain, TensorSpace):
raise TypeError('`domain` must be a `TensorSpace` instance, got '
'{!r}'.format(domain))
self.__sampling_points = _normalize_sampling_points(sampling_points,
domain.ndim)
# Flatten indices during init for faster indexing later
indices_flat = np.ravel_multi_index(self.sampling_points,
dims=domain.shape)
if np.isscalar(indices_flat):
self._indices_flat = np.array([indices_flat], dtype=int)
else:
self._indices_flat = indices_flat
self.__variant = str(variant).lower()
if self.variant not in ('point_eval', 'integrate'):
raise ValueError('`variant` {!r} not understood'.format(variant))
ran = tensor_space(self.sampling_points[0].size, dtype=domain.dtype)
super(SamplingOperator, self).__init__(domain, ran, linear=True)
@property
def variant(self):
"""Weighting scheme for the sampling operator."""
return self.__variant
@property
def sampling_points(self):
"""Indices where to sample the function."""
return self.__sampling_points
def _call(self, x):
"""Return values at indices, possibly weighted."""
out = x.asarray().ravel()[self._indices_flat]
if self.variant == 'point_eval':
weights = 1.0
elif self.variant == 'integrate':
weights = getattr(self.domain, 'cell_volume', 1.0)
else:
raise RuntimeError('bad variant {!r}'.format(self.variant))
if weights != 1.0:
out *= weights
return out
@property
def adjoint(self):
"""Adjoint of the sampling operator, a `WeightedSumSamplingOperator`.
If each sampling point occurs only once, the adjoint consists
in inserting the given values into the output at the sampling
points. Duplicate sampling points are weighted with their
multiplicity.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.SamplingOperator(space, sampling_points)
>>> x = space.element([[1, 2, 3],
... [4, 5, 6]])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
The ``'integrate'`` variant adjoint puts ones at the indices in
``sampling_points``, multiplied by their multiplicity:
>>> op = odl.SamplingOperator(space, sampling_points,
... variant='integrate')
>>> op.adjoint(op.range.one()) # (0, 0) occurs twice
uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element(
[[ 2., 0., 0.],
[ 0., 1., 1.]]
)
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
"""
if self.variant == 'point_eval':
variant = 'dirac'
elif self.variant == 'integrate':
variant = 'char_fun'
else:
raise RuntimeError('bad variant {!r}'.format(self.variant))
return WeightedSumSamplingOperator(self.domain, self.sampling_points,
variant)
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.domain, self.sampling_points]
optargs = [('variant', self.variant, 'point_eval')]
sig_str = signature_string(posargs, optargs, mod=['!r', ''],
sep=[',\n', '', ',\n'])
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(sig_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
class WeightedSumSamplingOperator(Operator):
r"""Operator computing the sum of coefficients at sampling locations.
This operator is the adjoint of `SamplingOperator`.
Notes
-----
The weighted sum sampling operator for a sequence
:math:`I = (i_n)_{n=1}^N`
of indices (possibly with duplicates) is given by
.. math::
W_I(g)(x) = \sum_{i \in I} d_i(x) g_i,
where :math:`g \in \mathbb{F}^N` is the value vector, and
:math:`d_i` is either a Dirac delta or a characteristic function of
the cell centered around the point indexed by :math:`i`.
"""
def __init__(self, range, sampling_points, variant='char_fun'):
"""Initialize a new instance.
Parameters
----------
range : `TensorSpace`
Set of elements into which this operator maps.
sampling_points : 1D `array-like` or sequence of 1D array-likes
Indices that determine the sampling points.
In n dimensions, it should be a sequence of n arrays, where
each member array is of equal length N. The indexed positions
are ``(arr1[i], arr2[i], ..., arrn[i])``, in total N
points.
If ``range`` is one-dimensional, a single array-like can be
used. Likewise, a single point can be given as integer in 1D,
and as a array-like sequence in nD.
variant : {'char_fun', 'dirac'}, optional
This option determines which function to sum over.
Examples
--------
In 1d, a single index (an int) or a sequence of such can be used
for indexing.
>>> space = odl.uniform_discr(0, 1, 4)
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points=1)
>>> op.domain
rn(1)
>>> x = op.domain.element([1])
>>> # Put value 1 at index 1
>>> op(x)
uniform_discr(0.0, 1.0, 4).element([ 0., 1., 0., 0.])
>>> op = odl.WeightedSumSamplingOperator(space,
... sampling_points=[1, 2, 1])
>>> op.domain
rn(3)
>>> x = op.domain.element([1, 0.5, 0.25])
>>> # Index 1 occurs twice and gets two contributions (1 and 0.25)
>>> op(x)
uniform_discr(0.0, 1.0, 4).element([ 0. , 1.25, 0.5 , 0. ])
The ``'dirac'`` variant scales the values by the reciprocal
cell volume of the operator range:
>>> op = odl.WeightedSumSamplingOperator(
... space, sampling_points=[1, 2, 1], variant='dirac')
>>> x = op.domain.element([1, 0.5, 0.25])
>>> 1 / op.range.cell_volume # the scaling constant
4.0
>>> op(x)
uniform_discr(0.0, 1.0, 4).element([ 0., 5., 2., 0.])
In higher dimensions, a sequence of index array-likes must be
given, or a single sequence for a single point:
>>> space = odl.uniform_discr([0, 0], [1, 1], (2, 3))
>>> # Sample at the index (0, 2)
>>> op = odl.WeightedSumSamplingOperator(space,
... sampling_points=[0, 2])
>>> x = op.domain.element([1])
>>> # Insert the value 1 at index (0, 2)
>>> op(x)
uniform_discr([ 0., 0.], [ 1., 1.], (2, 3)).element(
[[ 0., 0., 1.],
[ 0., 0., 0.]]
)
>>> sampling_points = [[0, 1], # indices (0, 2) and (1, 1)
... [2, 1]]
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points)
>>> x = op.domain.element([1, 2])
>>> op(x)
uniform_discr([ 0., 0.], [ 1., 1.], (2, 3)).element(
[[ 0., 0., 1.],
[ 0., 2., 0.]]
)
"""
if not isinstance(range, TensorSpace):
raise TypeError('`range` must be a `TensorSpace` instance, got '
'{!r}'.format(range))
self.__sampling_points = _normalize_sampling_points(sampling_points,
range.ndim)
# Convert a list of index arrays to linear index array
indices_flat = np.ravel_multi_index(self.sampling_points,
dims=range.shape)
if np.isscalar(indices_flat):
self._indices_flat = np.array([indices_flat], dtype=int)
else:
self._indices_flat = indices_flat
self.__variant = str(variant).lower()
if self.variant not in ('dirac', 'char_fun'):
raise ValueError('`variant` {!r} not understood'.format(variant))
domain = tensor_space(self.sampling_points[0].size, dtype=range.dtype)
super(WeightedSumSamplingOperator, self).__init__(
domain, range, linear=True)
@property
def variant(self):
"""Weighting scheme for the operator."""
return self.__variant
@property
def sampling_points(self):
"""Indices where to sample the function."""
return self.__sampling_points
def _call(self, x):
"""Sum all values if indices are given multiple times."""
y = np.bincount(self._indices_flat, weights=x,
minlength=self.range.size)
out = y.reshape(self.range.shape)
if self.variant == 'dirac':
weights = getattr(self.range, 'cell_volume', 1.0)
elif self.variant == 'char_fun':
weights = 1.0
else:
raise RuntimeError('The variant "{!r}" is not yet supported'
''.format(self.variant))
if weights != 1.0:
out /= weights
return out
@property
def adjoint(self):
"""Adjoint of this operator, a `SamplingOperator`.
The ``'char_fun'`` variant of this operator corresponds to the
``'integrate'`` sampling operator, and ``'dirac'`` corresponds to
``'point_eval'``.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> # Point (0, 0) occurs twice
>>> sampling_points = [[0, 1, 1, 0],
... [0, 1, 2, 0]]
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='dirac')
>>> y = op.range.element([[1, 2, 3],
... [4, 5, 6]])
>>> op.adjoint(y)
rn(4).element([ 1., 5., 6., 1.])
>>> x = op.domain.element([1, 2, 3, 4])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
>>> op = odl.WeightedSumSamplingOperator(space, sampling_points,
... variant='char_fun')
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
"""
if self.variant == 'dirac':
variant = 'point_eval'
elif self.variant == 'char_fun':
variant = 'integrate'
else:
raise RuntimeError('The variant "{!r}" is not yet supported'
''.format(self.variant))
return SamplingOperator(self.range, self.sampling_points, variant)
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.range, self.sampling_points]
optargs = [('variant', self.variant, 'char_fun')]
sig_str = signature_string(posargs, optargs, mod=['!r', ''],
sep=[',\n', '', ',\n'])
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(sig_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
class FlatteningOperator(Operator):
"""Operator that reshapes the object as a column vector.
The operation performed by this operator is ::
FlatteningOperator(x) == ravel(x)
The range of this operator is always a `TensorSpace`, i.e., even if
the domain is a discrete function space.
"""
def __init__(self, domain, order='C'):
"""Initialize a new instance.
Parameters
----------
domain : `TensorSpace`
Set of elements on which this operator acts.
order : {'C', 'F'}, optional
If provided, flattening is performed in this order. ``'C'``
means that that the last index is changing fastest, while in
``'F'`` ordering, the first index changes fastest.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3))
>>> op = odl.FlatteningOperator(space)
>>> op.range
rn(6)
>>> x = space.element([[1, 2, 3],
... [4, 5, 6]])
>>> op(x)
rn(6).element([ 1., 2., 3., 4., 5., 6.])
>>> op = odl.FlatteningOperator(space, order='F')
>>> op(x)
rn(6).element([ 1., 4., 2., 5., 3., 6.])
"""
if not isinstance(domain, TensorSpace):
raise TypeError('`domain` must be a `TensorSpace` instance, got '
'{!r}'.format(domain))
self.__order = str(order).upper()
if self.order not in ('C', 'F'):
raise ValueError('`order` {!r} not understood'.format(order))
range = tensor_space(domain.size, dtype=domain.dtype)
super(FlatteningOperator, self).__init__(domain, range, linear=True)
def _call(self, x):
"""Flatten ``x``."""
return np.ravel(x, order=self.order)
@property
def order(self):
"""order of the flattening operation."""
return self.__order
@property
def adjoint(self):
"""Adjoint of the flattening, a scaled version of the `inverse`.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 4))
>>> op = odl.FlatteningOperator(space)
>>> y = op.range.element([1, 2, 3, 4, 5, 6, 7, 8])
>>> 1 / space.cell_volume # the scaling factor
2.0
>>> op.adjoint(y)
uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element(
[[ 2., 4., 6., 8.],
[ 10., 12., 14., 16.]]
)
>>> x = space.element([[1, 2, 3, 4],
... [5, 6, 7, 8]])
>>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10
True
"""
scaling = getattr(self.domain, 'cell_volume', 1.0)
return 1 / scaling * self.inverse
@property
def inverse(self):
"""Operator that reshapes to original shape.
Examples
--------
>>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 4))
>>> op = odl.FlatteningOperator(space)
>>> y = op.range.element([1, 2, 3, 4, 5, 6, 7, 8])
>>> op.inverse(y)
uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element(
[[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.]]
)
>>> op = odl.FlatteningOperator(space, order='F')
>>> op.inverse(y)
uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element(
[[ 1., 3., 5., 7.],
[ 2., 4., 6., 8.]]
)
>>> op(op.inverse(y)) == y
True
"""
op = self
scaling = getattr(self.domain, 'cell_volume', 1.0)
class FlatteningOperatorInverse(Operator):
"""Inverse of `FlatteningOperator`.
This operator reshapes a flat vector back to original shape::
FlatteningOperatorInverse(x) == reshape(x, orig_shape)
"""
def __init__(self):
"""Initialize a new instance."""
super(FlatteningOperatorInverse, self).__init__(
op.range, op.domain, linear=True)
def _call(self, x):
"""Reshape ``x`` back to n-dim. shape."""
return np.reshape(x.asarray(), self.range.shape,
order=op.order)
@property
def adjoint(self):
"""Adjoint of this operator, a scaled `FlatteningOperator`."""
return scaling * op
@property
def inverse(self):
"""Inverse of this operator."""
return op
def __repr__(self):
"""Return ``repr(self)``."""
return '{!r}.inverse'.format(op)
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
return FlatteningOperatorInverse()
def __repr__(self):
"""Return ``repr(self)``."""
posargs = [self.domain]
optargs = [('order', self.order, 'C')]
sig_str = signature_string(posargs, optargs, mod=['!r', ''],
sep=['', '', ',\n'])
return '{}(\n{}\n)'.format(self.__class__.__name__, indent(sig_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
def is_compatible_space(space, base_space):
"""Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
"""
if isinstance(base_space, ProductSpace):
return False
if isinstance(space, ProductSpace):
if not space.is_power_space:
return False
elif len(space) == 0:
return False
else:
return is_compatible_space(space[0], base_space)
else:
if hasattr(space, 'astype') and hasattr(base_space, 'dtype'):
# TODO: maybe only the shape should play a role?
comp_space = space.astype(base_space.dtype)
else:
comp_space = space
return comp_space == base_space
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
|
mpl-2.0
|
058d564832f96c5ecd8629a8d858378d
| 36.509792
| 79
| 0.53879
| 4.098636
| false
| false
| false
| false
|
odlgroup/odl
|
examples/solvers/pdhg_denoising_L2_HuberTV.py
|
2
|
4708
|
"""Linearly convergent total variation denoising using PDHG.
This exhaustive example solves the L2-HuberTV problem
min_{x >= 0} 1/2 ||x - d||_2^2
+ lam * sum_i eta_gamma(||grad(x)_i||_2)
where ``grad`` is the spatial gradient and ``d`` is given noisy data. Here
``eta_gamma`` denotes the Huber function. For more details, see the Huber
documentation.
We compare two different step size rules as described in
Chambolle, A., & Pock, T. (2011). *A First-Order Primal-Dual Algorithm for
Convex Problems with Applications to Imaging. Journal of Mathematical Imaging
and Vision, 40(1), 120–145. http://doi.org/10.1007/s10851-010-0251-1
Chambolle, A., Ehrhardt, M. J., Richtárik, P., & Schönlieb, C.-B. (2017).
Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling and
Imaging Applications. Retrieved from http://arxiv.org/abs/1706.04957
and show their convergence rates.
For further details and a description of the solution method used, see
https://odlgroup.github.io/odl/guide/pdhg_guide.html in the ODL documentation.
"""
import numpy as np
import scipy.misc
import odl
import matplotlib.pyplot as plt
# Define ground truth, space and noisy data
image = np.rot90(scipy.misc.ascent()[::2, ::2].astype('float'), 3)
shape = image.shape
image /= image.max()
space = odl.uniform_discr([0, 0], shape, shape)
orig = space.element(image.copy())
d = odl.phantom.white_noise(space, orig, 0.1)
# Define objective functional
op = odl.Gradient(space) # operator
norm_op = np.sqrt(8) + 1e-4 # norm with forward differences is well-known
lam = 0.1 # Regularization parameter
f = 1 / (2 * lam) * odl.solvers.L2NormSquared(space).translated(d) # data fit
g = odl.solvers.Huber(op.range, gamma=.01) # regularization
obj_fun = f + g * op # combined functional
mu_g = 1 / lam # strong convexity of "g"
mu_f = 1 / f.grad_lipschitz # strong convexity of "f*"
# Define algorithm parameters
class CallbackStore(odl.solvers.Callback): # Callback to store function values
def __init__(self):
self.iteration_count = 0
self.iteration_counts = []
self.obj_function_values = []
def __call__(self, x):
self.iteration_count += 1
self.iteration_counts.append(self.iteration_count)
self.obj_function_values.append(obj_fun(x))
def reset(self):
self.iteration_count = 0
self.iteration_counts = []
self.obj_function_values = []
callback = odl.solvers.CallbackPrintIteration(step=10) & CallbackStore()
niter = 200 # number of iterations
# Parameters for algorithm 1
# Related to the root of the problem condition number
kappa1 = np.sqrt(1 + 0.999 * norm_op ** 2 / (mu_g * mu_f))
tau1 = 1 / (mu_g * (kappa1 - 1)) # Primal step size
sigma1 = 1 / (mu_f * (kappa1 - 1)) # Dual step size
theta1 = 1 - 2 / (1 + kappa1) # Extrapolation constant
# Parameters for algorithm 2
# Square root of the problem condition number
kappa2 = norm_op / np.sqrt(mu_f * mu_g)
tau2 = 1 / norm_op * np.sqrt(mu_f / mu_g) # Primal step size
sigma2 = 1 / norm_op * np.sqrt(mu_g / mu_f) # Dual step size
theta2 = 1 - 2 / (2 + kappa2) # Extrapolation constant
# Run linearly convergent algorithm 1
x1 = space.zero()
callback(x1) # store values for initialization
odl.solvers.pdhg(x1, f, g, op, niter, tau1, sigma1, theta=theta1,
callback=callback)
obj1 = callback.callbacks[1].obj_function_values
# Run linearly convergent algorithm 2
callback.reset()
x2 = space.zero()
callback(x2) # store values for initialization
odl.solvers.pdhg(x2, f, g, op, niter, tau2, sigma2, theta=theta2,
callback=callback)
obj2 = callback.callbacks[1].obj_function_values
# %% Display results
# Show images
clim = [0, 1]
cmap = 'gray'
orig.show('Original', clim=clim, cmap=cmap)
d.show('Noisy', clim=clim, cmap=cmap)
x1.show('Denoised, Algo 1', clim=clim, cmap=cmap)
x2.show('Denoised, Algo 2', clim=clim, cmap=cmap)
# Show convergence rate
min_obj = min(obj1 + obj2)
def rel_fun(x):
x = np.array(x)
return (x - min_obj) / (x[0] - min_obj)
iters = np.array(callback.callbacks[1].iteration_counts)
plt.figure()
plt.semilogy(iters, rel_fun(obj1), color='red',
label='Algo 1, Chambolle et al 2017')
plt.semilogy(iters, rel_fun(obj2), color='blue',
label='Algo 2, Chambolle and Pock 2011')
rho = theta1
plt.semilogy(iters[1:], rho ** iters[1:], '--', color='red',
label=r'$O(\rho_1^k), \rho_1={:3.2f}$'.format(rho))
rho = theta2
plt.semilogy(iters[1:], rho ** iters[1:], '--', color='blue',
label=r'$O(\rho_2^k), \rho_2={:3.2f}$'.format(rho))
plt.title('Function Values + Theoretical Upper Bounds')
plt.ylim((1e-16, 1))
plt.legend()
|
mpl-2.0
|
17531f7ccca6f7c320e5842ec6bc7387
| 33.086957
| 79
| 0.673895
| 2.894769
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/filmon.py
|
63
|
6049
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_HTTPError,
)
from ..utils import (
qualities,
strip_or_none,
int_or_none,
ExtractorError,
)
class FilmOnIE(InfoExtractor):
IE_NAME = 'filmon'
_VALID_URL = r'(?:https?://(?:www\.)?filmon\.com/vod/view/|filmon:)(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.filmon.com/vod/view/24869-0-plan-9-from-outer-space',
'info_dict': {
'id': '24869',
'ext': 'mp4',
'title': 'Plan 9 From Outer Space',
'description': 'Dead human, zombies and vampires',
},
}, {
'url': 'https://www.filmon.com/vod/view/2825-1-popeye-series-1',
'info_dict': {
'id': '2825',
'title': 'Popeye Series 1',
'description': 'The original series of Popeye.',
},
'playlist_mincount': 8,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
response = self._download_json(
'https://www.filmon.com/api/vod/movie?id=%s' % video_id,
video_id)['response']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
errmsg = self._parse_json(e.cause.read().decode(), video_id)['reason']
raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True)
raise
title = response['title']
description = strip_or_none(response.get('description'))
if response.get('type_id') == 1:
entries = [self.url_result('filmon:' + episode_id) for episode_id in response.get('episodes', [])]
return self.playlist_result(entries, video_id, title, description)
QUALITY = qualities(('low', 'high'))
formats = []
for format_id, stream in response.get('streams', {}).items():
stream_url = stream.get('url')
if not stream_url:
continue
formats.append({
'format_id': format_id,
'url': stream_url,
'ext': 'mp4',
'quality': QUALITY(stream.get('quality')),
'protocol': 'm3u8_native',
})
self._sort_formats(formats)
thumbnails = []
poster = response.get('poster', {})
thumbs = poster.get('thumbs', {})
thumbs['poster'] = poster
for thumb_id, thumb in thumbs.items():
thumb_url = thumb.get('url')
if not thumb_url:
continue
thumbnails.append({
'id': thumb_id,
'url': thumb_url,
'width': int_or_none(thumb.get('width')),
'height': int_or_none(thumb.get('height')),
})
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'thumbnails': thumbnails,
}
class FilmOnChannelIE(InfoExtractor):
IE_NAME = 'filmon:channel'
_VALID_URL = r'https?://(?:www\.)?filmon\.com/(?:tv|channel)/(?P<id>[a-z0-9-]+)'
_TESTS = [{
# VOD
'url': 'http://www.filmon.com/tv/sports-haters',
'info_dict': {
'id': '4190',
'ext': 'mp4',
'title': 'Sports Haters',
'description': 'md5:dabcb4c1d9cfc77085612f1a85f8275d',
},
}, {
# LIVE
'url': 'https://www.filmon.com/channel/filmon-sports',
'only_matching': True,
}, {
'url': 'https://www.filmon.com/tv/2894',
'only_matching': True,
}]
_THUMBNAIL_RES = [
('logo', 56, 28),
('big_logo', 106, 106),
('extra_big_logo', 300, 300),
]
def _real_extract(self, url):
channel_id = self._match_id(url)
try:
channel_data = self._download_json(
'http://www.filmon.com/api-v2/channel/' + channel_id, channel_id)['data']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
errmsg = self._parse_json(e.cause.read().decode(), channel_id)['message']
raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True)
raise
channel_id = compat_str(channel_data['id'])
is_live = not channel_data.get('is_vod') and not channel_data.get('is_vox')
title = channel_data['title']
QUALITY = qualities(('low', 'high'))
formats = []
for stream in channel_data.get('streams', []):
stream_url = stream.get('url')
if not stream_url:
continue
if not is_live:
formats.extend(self._extract_wowza_formats(
stream_url, channel_id, skip_protocols=['dash', 'rtmp', 'rtsp']))
continue
quality = stream.get('quality')
formats.append({
'format_id': quality,
# this is an m3u8 stream, but we are deliberately not using _extract_m3u8_formats
# because it doesn't have bitrate variants anyway
'url': stream_url,
'ext': 'mp4',
'quality': QUALITY(quality),
})
self._sort_formats(formats)
thumbnails = []
for name, width, height in self._THUMBNAIL_RES:
thumbnails.append({
'id': name,
'url': 'http://static.filmon.com/assets/channels/%s/%s.png' % (channel_id, name),
'width': width,
'height': height,
})
return {
'id': channel_id,
'display_id': channel_data.get('alias'),
'title': self._live_title(title) if is_live else title,
'description': channel_data.get('description'),
'thumbnails': thumbnails,
'formats': formats,
'is_live': is_live,
}
|
unlicense
|
7f681034b497a9220b27f6a72e04c59e
| 32.983146
| 110
| 0.503554
| 3.731647
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/nonktube.py
|
15
|
1151
|
from __future__ import unicode_literals
from .nuevo import NuevoBaseIE
class NonkTubeIE(NuevoBaseIE):
_VALID_URL = r'https?://(?:www\.)?nonktube\.com/(?:(?:video|embed)/|media/nuevo/embed\.php\?.*?\bid=)(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.nonktube.com/video/118636/sensual-wife-uncensored-fucked-in-hairy-pussy-and-facialized',
'info_dict': {
'id': '118636',
'ext': 'mp4',
'title': 'Sensual Wife Uncensored Fucked In Hairy Pussy And Facialized',
'age_limit': 18,
'duration': 1150.98,
},
'params': {
'skip_download': True,
}
}, {
'url': 'https://www.nonktube.com/embed/118636',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
info = self._parse_html5_media_entries(url, webpage, video_id)[0]
info.update({
'id': video_id,
'title': title,
'age_limit': 18,
})
return info
|
unlicense
|
b51e39cc51665e3a124b84b132572f4b
| 29.289474
| 117
| 0.533449
| 3.153425
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/stv.py
|
5
|
3447
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_str,
float_or_none,
int_or_none,
smuggle_url,
str_or_none,
try_get,
)
class STVPlayerIE(InfoExtractor):
IE_NAME = 'stv:player'
_VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})'
_TESTS = [{
# shortform
'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/',
'md5': '5adf9439c31d554f8be0707c7abe7e0a',
'info_dict': {
'id': '5333973339001',
'ext': 'mp4',
'upload_date': '20170301',
'title': '60 seconds on set with Laura Norton',
'description': "How many questions can Laura - a.k.a Kerry Wyatt - answer in 60 seconds? Let\'s find out!",
'timestamp': 1488388054,
'uploader_id': '1486976045',
},
'skip': 'this resource is unavailable outside of the UK',
}, {
# episodes
'url': 'https://player.stv.tv/episode/4125/jennifer-saunders-memory-lane',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s'
_PTYPE_MAP = {
'episode': 'episodes',
'video': 'shortform',
}
def _real_extract(self, url):
ptype, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id, fatal=False) or ''
props = (self._parse_json(self._search_regex(
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
webpage, 'next data', default='{}'), video_id,
fatal=False) or {}).get('props') or {}
player_api_cache = try_get(
props, lambda x: x['initialReduxState']['playerApiCache']) or {}
api_path, resp = None, {}
for k, v in player_api_cache.items():
if k.startswith('/episodes/') or k.startswith('/shortform/'):
api_path, resp = k, v
break
else:
episode_id = str_or_none(try_get(
props, lambda x: x['pageProps']['episodeId']))
api_path = '/%s/%s' % (self._PTYPE_MAP[ptype], episode_id or video_id)
result = resp.get('results')
if not result:
resp = self._download_json(
'https://player.api.stv.tv/v1' + api_path, video_id)
result = resp['results']
video = result['video']
video_id = compat_str(video['id'])
subtitles = {}
_subtitles = result.get('_subtitles') or {}
for ext, sub_url in _subtitles.items():
subtitles.setdefault('en', []).append({
'ext': 'vtt' if ext == 'webvtt' else ext,
'url': sub_url,
})
programme = result.get('programme') or {}
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % video_id, {'geo_countries': ['GB']}),
'description': result.get('summary'),
'duration': float_or_none(video.get('length'), 1000),
'subtitles': subtitles,
'view_count': int_or_none(result.get('views')),
'series': programme.get('name') or programme.get('shortName'),
'ie_key': 'BrightcoveNew',
}
|
unlicense
|
2c0e56adc6ddf42013a0aedd9954746a
| 35.284211
| 119
| 0.536989
| 3.433267
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/puhutv.py
|
11
|
8458
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_resolution,
str_or_none,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class PuhuTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-izle'
IE_NAME = 'puhutv'
_TESTS = [{
# film
'url': 'https://puhutv.com/sut-kardesler-izle',
'md5': 'a347470371d56e1585d1b2c8dab01c96',
'info_dict': {
'id': '5085',
'display_id': 'sut-kardesler',
'ext': 'mp4',
'title': 'Süt Kardeşler',
'description': 'md5:ca09da25b7e57cbb5a9280d6e48d17aa',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 4832.44,
'creator': 'Arzu Film',
'timestamp': 1561062602,
'upload_date': '20190620',
'release_year': 1976,
'view_count': int,
'tags': list,
},
}, {
# episode, geo restricted, bypassable with --geo-verification-proxy
'url': 'https://puhutv.com/jet-sosyete-1-bolum-izle',
'only_matching': True,
}, {
# 4k, with subtitles
'url': 'https://puhutv.com/dip-1-bolum-izle',
'only_matching': True,
}]
_SUBTITLE_LANGS = {
'English': 'en',
'Deutsch': 'de',
'عربى': 'ar'
}
def _real_extract(self, url):
display_id = self._match_id(url)
info = self._download_json(
urljoin(url, '/api/slug/%s-izle' % display_id),
display_id)['data']
video_id = compat_str(info['id'])
show = info.get('title') or {}
title = info.get('name') or show['name']
if info.get('display_name'):
title = '%s %s' % (title, info['display_name'])
try:
videos = self._download_json(
'https://puhutv.com/api/assets/%s/videos' % video_id,
display_id, 'Downloading video JSON',
headers=self.geo_verification_headers())
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self.raise_geo_restricted()
raise
urls = []
formats = []
for video in videos['data']['videos']:
media_url = url_or_none(video.get('url'))
if not media_url or media_url in urls:
continue
urls.append(media_url)
playlist = video.get('is_playlist')
if (video.get('stream_type') == 'hls' and playlist is True) or 'playlist.m3u8' in media_url:
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
continue
quality = int_or_none(video.get('quality'))
f = {
'url': media_url,
'ext': 'mp4',
'height': quality
}
video_format = video.get('video_format')
is_hls = (video_format == 'hls' or '/hls/' in media_url or '/chunklist.m3u8' in media_url) and playlist is False
if is_hls:
format_id = 'hls'
f['protocol'] = 'm3u8_native'
elif video_format == 'mp4':
format_id = 'http'
else:
continue
if quality:
format_id += '-%sp' % quality
f['format_id'] = format_id
formats.append(f)
self._sort_formats(formats)
creator = try_get(
show, lambda x: x['producer']['name'], compat_str)
content = info.get('content') or {}
images = try_get(
content, lambda x: x['images']['wide'], dict) or {}
thumbnails = []
for image_id, image_url in images.items():
if not isinstance(image_url, compat_str):
continue
if not image_url.startswith(('http', '//')):
image_url = 'https://%s' % image_url
t = parse_resolution(image_id)
t.update({
'id': image_id,
'url': image_url
})
thumbnails.append(t)
tags = []
for genre in show.get('genres') or []:
if not isinstance(genre, dict):
continue
genre_name = genre.get('name')
if genre_name and isinstance(genre_name, compat_str):
tags.append(genre_name)
subtitles = {}
for subtitle in content.get('subtitles') or []:
if not isinstance(subtitle, dict):
continue
lang = subtitle.get('language')
sub_url = url_or_none(subtitle.get('url') or subtitle.get('file'))
if not lang or not isinstance(lang, compat_str) or not sub_url:
continue
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = [{
'url': sub_url
}]
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': info.get('description') or show.get('description'),
'season_id': str_or_none(info.get('season_id')),
'season_number': int_or_none(info.get('season_number')),
'episode_number': int_or_none(info.get('episode_number')),
'release_year': int_or_none(show.get('released_at')),
'timestamp': unified_timestamp(info.get('created_at')),
'creator': creator,
'view_count': int_or_none(content.get('watch_count')),
'duration': float_or_none(content.get('duration_in_ms'), 1000),
'tags': tags,
'subtitles': subtitles,
'thumbnails': thumbnails,
'formats': formats
}
class PuhuTVSerieIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?puhutv\.com/(?P<id>[^/?#&]+)-detay'
IE_NAME = 'puhutv:serie'
_TESTS = [{
'url': 'https://puhutv.com/deniz-yildizi-detay',
'info_dict': {
'title': 'Deniz Yıldızı',
'id': 'deniz-yildizi',
},
'playlist_mincount': 205,
}, {
# a film detail page which is using same url with serie page
'url': 'https://puhutv.com/kaybedenler-kulubu-detay',
'only_matching': True,
}]
def _extract_entries(self, seasons):
for season in seasons:
season_id = season.get('id')
if not season_id:
continue
page = 1
has_more = True
while has_more is True:
season = self._download_json(
'https://galadriel.puhutv.com/seasons/%s' % season_id,
season_id, 'Downloading page %s' % page, query={
'page': page,
'per': 40,
})
episodes = season.get('episodes')
if isinstance(episodes, list):
for ep in episodes:
slug_path = str_or_none(ep.get('slugPath'))
if not slug_path:
continue
video_id = str_or_none(int_or_none(ep.get('id')))
yield self.url_result(
'https://puhutv.com/%s' % slug_path,
ie=PuhuTVIE.ie_key(), video_id=video_id,
video_title=ep.get('name') or ep.get('eventLabel'))
page += 1
has_more = season.get('hasMore')
def _real_extract(self, url):
playlist_id = self._match_id(url)
info = self._download_json(
urljoin(url, '/api/slug/%s-detay' % playlist_id),
playlist_id)['data']
seasons = info.get('seasons')
if seasons:
return self.playlist_result(
self._extract_entries(seasons), playlist_id, info.get('name'))
# For films, these are using same url with series
video_id = info.get('slug') or info['assets'][0]['slug']
return self.url_result(
'https://puhutv.com/%s-izle' % video_id,
PuhuTVIE.ie_key(), video_id)
|
unlicense
|
1da1150b841fc2cd453280ca1e837632
| 34.351464
| 124
| 0.492839
| 3.708955
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/foxgay.py
|
50
|
2203
|
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..utils import (
get_element_by_id,
int_or_none,
remove_end,
)
class FoxgayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?foxgay\.com/videos/(?:\S+-)?(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://foxgay.com/videos/fuck-turkish-style-2582.shtml',
'md5': '344558ccfea74d33b7adbce22e577f54',
'info_dict': {
'id': '2582',
'ext': 'mp4',
'title': 'Fuck Turkish-style',
'description': 'md5:6ae2d9486921891efe89231ace13ffdf',
'age_limit': 18,
'thumbnail': r're:https?://.*\.jpg$',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = remove_end(self._html_search_regex(
r'<title>([^<]+)</title>', webpage, 'title'), ' - Foxgay.com')
description = get_element_by_id('inf_tit', webpage)
# The default user-agent with foxgay cookies leads to pages without videos
self._downloader.cookiejar.clear('.foxgay.com')
# Find the URL for the iFrame which contains the actual video.
iframe_url = self._html_search_regex(
r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1', webpage,
'video frame', group='url')
iframe = self._download_webpage(
iframe_url, video_id, headers={'User-Agent': 'curl/7.50.1'},
note='Downloading video frame')
video_data = self._parse_json(self._search_regex(
r'video_data\s*=\s*([^;]+);', iframe, 'video data'), video_id)
formats = [{
'url': source,
'height': int_or_none(resolution),
} for source, resolution in zip(
video_data['sources'], video_data.get('resolutions', itertools.repeat(None)))]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'thumbnail': video_data.get('act_vid', {}).get('thumb'),
'age_limit': 18,
}
|
unlicense
|
27caae54b91322db398d457b53f38751
| 33.968254
| 90
| 0.545166
| 3.485759
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/twitter.py
|
1
|
27930
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_parse_qs,
compat_urllib_parse_unquote,
compat_urllib_parse_urlparse,
)
from ..utils import (
dict_get,
ExtractorError,
float_or_none,
int_or_none,
try_get,
strip_or_none,
unified_timestamp,
update_url_query,
xpath_text,
)
from .periscope import (
PeriscopeBaseIE,
PeriscopeIE,
)
class TwitterBaseIE(InfoExtractor):
_API_BASE = 'https://api.twitter.com/1.1/'
_BASE_REGEX = r'https?://(?:(?:www|m(?:obile)?)\.)?twitter\.com/'
_GUEST_TOKEN = None
def _extract_variant_formats(self, variant, video_id):
variant_url = variant.get('url')
if not variant_url:
return []
elif '.m3u8' in variant_url:
return self._extract_m3u8_formats(
variant_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
else:
tbr = int_or_none(dict_get(variant, ('bitrate', 'bit_rate')), 1000) or None
f = {
'url': variant_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
}
self._search_dimensions_in_video_url(f, variant_url)
return [f]
def _extract_formats_from_vmap_url(self, vmap_url, video_id):
vmap_data = self._download_xml(vmap_url, video_id)
formats = []
urls = []
for video_variant in vmap_data.findall('.//{http://twitter.com/schema/videoVMapV2.xsd}videoVariant'):
video_variant.attrib['url'] = compat_urllib_parse_unquote(
video_variant.attrib['url'])
urls.append(video_variant.attrib['url'])
formats.extend(self._extract_variant_formats(
video_variant.attrib, video_id))
video_url = strip_or_none(xpath_text(vmap_data, './/MediaFile'))
if video_url not in urls:
formats.extend(self._extract_variant_formats({'url': video_url}, video_id))
return formats
@staticmethod
def _search_dimensions_in_video_url(a_format, video_url):
m = re.search(r'/(?P<width>\d+)x(?P<height>\d+)/', video_url)
if m:
a_format.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
def _call_api(self, path, video_id, query={}):
headers = {
'Authorization': 'Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw',
}
if not self._GUEST_TOKEN:
self._GUEST_TOKEN = self._download_json(
self._API_BASE + 'guest/activate.json', video_id,
'Downloading guest token', data=b'',
headers=headers)['guest_token']
headers['x-guest-token'] = self._GUEST_TOKEN
try:
return self._download_json(
self._API_BASE + path, video_id, headers=headers, query=query)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
raise ExtractorError(self._parse_json(
e.cause.read().decode(),
video_id)['errors'][0]['message'], expected=True)
raise
class TwitterCardIE(InfoExtractor):
IE_NAME = 'twitter:card'
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/(?:cards/tfw/v1|videos(?:/tweet)?)/(?P<id>\d+)'
_TESTS = [
{
'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889',
# MD5 checksums are different in different places
'info_dict': {
'id': '560070183650213889',
'ext': 'mp4',
'title': "Twitter - You can now shoot, edit and share video on Twitter. Capture life's most moving moments from your perspective.",
'description': 'md5:18d3e24bb4f6e5007487dd546e53bd96',
'uploader': 'Twitter',
'uploader_id': 'Twitter',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 30.033,
'timestamp': 1422366112,
'upload_date': '20150127',
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768',
'md5': '7137eca597f72b9abbe61e5ae0161399',
'info_dict': {
'id': '623160978427936768',
'ext': 'mp4',
'title': "NASA - Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video.",
'description': "Fly over Pluto's icy Norgay Mountains and Sputnik Plain in this @NASANewHorizons #PlutoFlyby video. https://t.co/BJYgOjSeGA",
'uploader': 'NASA',
'uploader_id': 'NASA',
'timestamp': 1437408129,
'upload_date': '20150720',
},
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977',
'md5': 'b6d9683dd3f48e340ded81c0e917ad46',
'info_dict': {
'id': 'dq4Oj5quskI',
'ext': 'mp4',
'title': 'Ubuntu 11.10 Overview',
'description': 'md5:a831e97fa384863d6e26ce48d1c43376',
'upload_date': '20111013',
'uploader': 'OMG! UBUNTU!',
'uploader_id': 'omgubuntu',
},
'add_ie': ['Youtube'],
},
{
'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568',
'md5': '6dabeaca9e68cbb71c99c322a4b42a11',
'info_dict': {
'id': 'iBb2x00UVlv',
'ext': 'mp4',
'upload_date': '20151113',
'uploader_id': '1189339351084113920',
'uploader': 'ArsenalTerje',
'title': 'Vine by ArsenalTerje',
'timestamp': 1447451307,
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/i/videos/tweet/705235433198714880',
'md5': '884812a2adc8aaf6fe52b15ccbfa3b88',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': "Brent Yarina - Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight.",
'description': "Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight. https://t.co/OrxcJ28Bns",
'uploader': 'Brent Yarina',
'uploader_id': 'BTNBrentYarina',
'timestamp': 1456976204,
'upload_date': '20160303',
},
'skip': 'This content is no longer available.',
}, {
'url': 'https://twitter.com/i/videos/752274308186120192',
'only_matching': True,
},
]
def _real_extract(self, url):
status_id = self._match_id(url)
return self.url_result(
'https://twitter.com/statuses/' + status_id,
TwitterIE.ie_key(), status_id)
class TwitterIE(TwitterBaseIE):
IE_NAME = 'twitter'
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'(?:(?:i/web|[^/]+)/status|statuses)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://twitter.com/freethenipple/status/643211948184596480',
'info_dict': {
'id': '643211948184596480',
'ext': 'mp4',
'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ',
'uploader': 'FREE THE NIPPLE',
'uploader_id': 'freethenipple',
'duration': 12.922,
'timestamp': 1442188653,
'upload_date': '20150913',
'age_limit': 18,
},
}, {
'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1',
'md5': 'f36dcd5fb92bf7057f155e7d927eeb42',
'info_dict': {
'id': '657991469417025536',
'ext': 'mp4',
'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai',
'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"',
'thumbnail': r're:^https?://.*\.png',
'uploader': 'Gifs',
'uploader_id': 'giphz',
},
'expected_warnings': ['height', 'width'],
'skip': 'Account suspended',
}, {
'url': 'https://twitter.com/starwars/status/665052190608723968',
'info_dict': {
'id': '665052190608723968',
'ext': 'mp4',
'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.',
'description': 'A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens. https://t.co/OkSqT2fjWJ',
'uploader_id': 'starwars',
'uploader': 'Star Wars',
'timestamp': 1447395772,
'upload_date': '20151113',
},
}, {
'url': 'https://twitter.com/BTNBrentYarina/status/705235433198714880',
'info_dict': {
'id': '705235433198714880',
'ext': 'mp4',
'title': "Brent Yarina - Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight.",
'description': "Khalil Iverson's missed highlight dunk. And made highlight dunk. In one highlight. https://t.co/OrxcJ28Bns",
'uploader_id': 'BTNBrentYarina',
'uploader': 'Brent Yarina',
'timestamp': 1456976204,
'upload_date': '20160303',
},
'params': {
# The same video as https://twitter.com/i/videos/tweet/705235433198714880
# Test case of TwitterCardIE
'skip_download': True,
},
}, {
'url': 'https://twitter.com/jaydingeer/status/700207533655363584',
'info_dict': {
'id': '700207533655363584',
'ext': 'mp4',
'title': 'simon vertugo - BEAT PROD: @suhmeduh #Damndaniel',
'description': 'BEAT PROD: @suhmeduh https://t.co/HBrQ4AfpvZ #Damndaniel https://t.co/byBooq2ejZ',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': 'simon vertugo',
'uploader_id': 'simonvertugo',
'duration': 30.0,
'timestamp': 1455777459,
'upload_date': '20160218',
},
}, {
'url': 'https://twitter.com/Filmdrunk/status/713801302971588609',
'md5': '89a15ed345d13b86e9a5a5e051fa308a',
'info_dict': {
'id': 'MIOxnrUteUd',
'ext': 'mp4',
'title': 'Dr.Pepperの飲み方 #japanese #バカ #ドクペ #電動ガン',
'uploader': 'TAKUMA',
'uploader_id': '1004126642786242560',
'timestamp': 1402826626,
'upload_date': '20140615',
},
'add_ie': ['Vine'],
}, {
'url': 'https://twitter.com/captainamerica/status/719944021058060289',
'info_dict': {
'id': '719944021058060289',
'ext': 'mp4',
'title': 'Captain America - @King0fNerd Are you sure you made the right choice? Find out in theaters.',
'description': '@King0fNerd Are you sure you made the right choice? Find out in theaters. https://t.co/GpgYi9xMJI',
'uploader_id': 'CaptainAmerica',
'uploader': 'Captain America',
'duration': 3.17,
'timestamp': 1460483005,
'upload_date': '20160412',
},
}, {
'url': 'https://twitter.com/OPP_HSD/status/779210622571536384',
'info_dict': {
'id': '1zqKVVlkqLaKB',
'ext': 'mp4',
'title': 'Sgt Kerry Schmidt - Ontario Provincial Police - Road rage, mischief, assault, rollover and fire in one occurrence',
'upload_date': '20160923',
'uploader_id': '1PmKqpJdOJQoY',
'uploader': 'Sgt Kerry Schmidt - Ontario Provincial Police',
'timestamp': 1474613214,
},
'add_ie': ['Periscope'],
}, {
# has mp4 formats via mobile API
'url': 'https://twitter.com/news_al3alm/status/852138619213144067',
'info_dict': {
'id': '852138619213144067',
'ext': 'mp4',
'title': 'عالم الأخبار - كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة',
'description': 'كلمة تاريخية بجلسة الجناسي التاريخية.. النائب خالد مؤنس العتيبي للمعارضين : اتقوا الله .. الظلم ظلمات يوم القيامة https://t.co/xg6OhpyKfN',
'uploader': 'عالم الأخبار',
'uploader_id': 'news_al3alm',
'duration': 277.4,
'timestamp': 1492000653,
'upload_date': '20170412',
},
'skip': 'Account suspended',
}, {
'url': 'https://twitter.com/i/web/status/910031516746514432',
'info_dict': {
'id': '910031516746514432',
'ext': 'mp4',
'title': 'Préfet de Guadeloupe - [Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre.',
'thumbnail': r're:^https?://.*\.jpg',
'description': '[Direct] #Maria Le centre se trouve actuellement au sud de Basse-Terre. Restez confinés. Réfugiez-vous dans la pièce la + sûre. https://t.co/mwx01Rs4lo',
'uploader': 'Préfet de Guadeloupe',
'uploader_id': 'Prefet971',
'duration': 47.48,
'timestamp': 1505803395,
'upload_date': '20170919',
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
# card via api.twitter.com/1.1/videos/tweet/config
'url': 'https://twitter.com/LisPower1/status/1001551623938805763',
'info_dict': {
'id': '1001551623938805763',
'ext': 'mp4',
'title': 're:.*?Shep is on a roll today.*?',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:37b9f2ff31720cef23b2bd42ee8a0f09',
'uploader': 'Lis Power',
'uploader_id': 'LisPower1',
'duration': 111.278,
'timestamp': 1527623489,
'upload_date': '20180529',
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
'url': 'https://twitter.com/foobar/status/1087791357756956680',
'info_dict': {
'id': '1087791357756956680',
'ext': 'mp4',
'title': 'Twitter - A new is coming. Some of you got an opt-in to try it now. Check out the emoji button, quick keyboard shortcuts, upgraded trends, advanced search, and more. Let us know your thoughts!',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:6dfd341a3310fb97d80d2bf7145df976',
'uploader': 'Twitter',
'uploader_id': 'Twitter',
'duration': 61.567,
'timestamp': 1548184644,
'upload_date': '20190122',
},
}, {
# not available in Periscope
'url': 'https://twitter.com/ViviEducation/status/1136534865145286656',
'info_dict': {
'id': '1vOGwqejwoWxB',
'ext': 'mp4',
'title': 'Vivi - Vivi founder @lior_rauchy announcing our new student feedback tool live at @EduTECH_AU #EduTECH2019',
'uploader': 'Vivi',
'uploader_id': '1eVjYOLGkGrQL',
},
'add_ie': ['TwitterBroadcast'],
}, {
# unified card
'url': 'https://twitter.com/BrooklynNets/status/1349794411333394432?s=20',
'info_dict': {
'id': '1349794411333394432',
'ext': 'mp4',
'title': 'md5:d1c4941658e4caaa6cb579260d85dcba',
'thumbnail': r're:^https?://.*\.jpg',
'description': 'md5:71ead15ec44cee55071547d6447c6a3e',
'uploader': 'Brooklyn Nets',
'uploader_id': 'BrooklynNets',
'duration': 324.484,
'timestamp': 1610651040,
'upload_date': '20210114',
},
'params': {
'skip_download': True,
},
}, {
# Twitch Clip Embed
'url': 'https://twitter.com/GunB1g/status/1163218564784017422',
'only_matching': True,
}, {
# promo_video_website card
'url': 'https://twitter.com/GunB1g/status/1163218564784017422',
'only_matching': True,
}, {
# promo_video_convo card
'url': 'https://twitter.com/poco_dandy/status/1047395834013384704',
'only_matching': True,
}, {
# appplayer card
'url': 'https://twitter.com/poco_dandy/status/1150646424461176832',
'only_matching': True,
}, {
# video_direct_message card
'url': 'https://twitter.com/qarev001/status/1348948114569269251',
'only_matching': True,
}, {
# poll2choice_video card
'url': 'https://twitter.com/CAF_Online/status/1349365911120195585',
'only_matching': True,
}, {
# poll3choice_video card
'url': 'https://twitter.com/SamsungMobileSA/status/1348609186725289984',
'only_matching': True,
}, {
# poll4choice_video card
'url': 'https://twitter.com/SouthamptonFC/status/1347577658079641604',
'only_matching': True,
}]
def _real_extract(self, url):
twid = self._match_id(url)
status = self._call_api(
'statuses/show/%s.json' % twid, twid, {
'cards_platform': 'Web-12',
'include_cards': 1,
'include_reply_count': 1,
'include_user_entities': 0,
'tweet_mode': 'extended',
})
title = description = status['full_text'].replace('\n', ' ')
# strip 'https -_t.co_BJYgOjSeGA' junk from filenames
title = re.sub(r'\s+(https?://[^ ]+)', '', title)
user = status.get('user') or {}
uploader = user.get('name')
if uploader:
title = '%s - %s' % (uploader, title)
uploader_id = user.get('screen_name')
tags = []
for hashtag in (try_get(status, lambda x: x['entities']['hashtags'], list) or []):
hashtag_text = hashtag.get('text')
if not hashtag_text:
continue
tags.append(hashtag_text)
info = {
'id': twid,
'title': title,
'description': description,
'uploader': uploader,
'timestamp': unified_timestamp(status.get('created_at')),
'uploader_id': uploader_id,
'uploader_url': 'https://twitter.com/' + uploader_id if uploader_id else None,
'like_count': int_or_none(status.get('favorite_count')),
'repost_count': int_or_none(status.get('retweet_count')),
'comment_count': int_or_none(status.get('reply_count')),
'age_limit': 18 if status.get('possibly_sensitive') else 0,
'tags': tags,
}
def extract_from_video_info(media):
video_info = media.get('video_info') or {}
formats = []
for variant in video_info.get('variants', []):
formats.extend(self._extract_variant_formats(variant, twid))
self._sort_formats(formats)
thumbnails = []
media_url = media.get('media_url_https') or media.get('media_url')
if media_url:
def add_thumbnail(name, size):
thumbnails.append({
'id': name,
'url': update_url_query(media_url, {'name': name}),
'width': int_or_none(size.get('w') or size.get('width')),
'height': int_or_none(size.get('h') or size.get('height')),
})
for name, size in media.get('sizes', {}).items():
add_thumbnail(name, size)
add_thumbnail('orig', media.get('original_info') or {})
info.update({
'formats': formats,
'thumbnails': thumbnails,
'duration': float_or_none(video_info.get('duration_millis'), 1000),
})
media = try_get(status, lambda x: x['extended_entities']['media'][0])
if media and media.get('type') != 'photo':
extract_from_video_info(media)
else:
card = status.get('card')
if card:
binding_values = card['binding_values']
def get_binding_value(k):
o = binding_values.get(k) or {}
return try_get(o, lambda x: x[x['type'].lower() + '_value'])
card_name = card['name'].split(':')[-1]
if card_name == 'player':
info.update({
'_type': 'url',
'url': get_binding_value('player_url'),
})
elif card_name == 'periscope_broadcast':
info.update({
'_type': 'url',
'url': get_binding_value('url') or get_binding_value('player_url'),
'ie_key': PeriscopeIE.ie_key(),
})
elif card_name == 'broadcast':
info.update({
'_type': 'url',
'url': get_binding_value('broadcast_url'),
'ie_key': TwitterBroadcastIE.ie_key(),
})
elif card_name == 'summary':
info.update({
'_type': 'url',
'url': get_binding_value('card_url'),
})
elif card_name == 'unified_card':
media_entities = self._parse_json(get_binding_value('unified_card'), twid)['media_entities']
extract_from_video_info(next(iter(media_entities.values())))
# amplify, promo_video_website, promo_video_convo, appplayer,
# video_direct_message, poll2choice_video, poll3choice_video,
# poll4choice_video, ...
else:
is_amplify = card_name == 'amplify'
vmap_url = get_binding_value('amplify_url_vmap') if is_amplify else get_binding_value('player_stream_url')
content_id = get_binding_value('%s_content_id' % (card_name if is_amplify else 'player'))
formats = self._extract_formats_from_vmap_url(vmap_url, content_id or twid)
self._sort_formats(formats)
thumbnails = []
for suffix in ('_small', '', '_large', '_x_large', '_original'):
image = get_binding_value('player_image' + suffix) or {}
image_url = image.get('url')
if not image_url or '/player-placeholder' in image_url:
continue
thumbnails.append({
'id': suffix[1:] if suffix else 'medium',
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
info.update({
'formats': formats,
'thumbnails': thumbnails,
'duration': int_or_none(get_binding_value(
'content_duration_seconds')),
})
else:
expanded_url = try_get(status, lambda x: x['entities']['urls'][0]['expanded_url'])
if not expanded_url:
raise ExtractorError("There's no video in this tweet.")
info.update({
'_type': 'url',
'url': expanded_url,
})
return info
class TwitterAmplifyIE(TwitterBaseIE):
IE_NAME = 'twitter:amplify'
_VALID_URL = r'https?://amp\.twimg\.com/v/(?P<id>[0-9a-f\-]{36})'
_TEST = {
'url': 'https://amp.twimg.com/v/0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
'md5': '7df102d0b9fd7066b86f3159f8e81bf6',
'info_dict': {
'id': '0ba0c3c7-0af3-4c0a-bed5-7efd1ffa2951',
'ext': 'mp4',
'title': 'Twitter Video',
'thumbnail': 're:^https?://.*',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
vmap_url = self._html_search_meta(
'twitter:amplify:vmap', webpage, 'vmap url')
formats = self._extract_formats_from_vmap_url(vmap_url, video_id)
thumbnails = []
thumbnail = self._html_search_meta(
'twitter:image:src', webpage, 'thumbnail', fatal=False)
def _find_dimension(target):
w = int_or_none(self._html_search_meta(
'twitter:%s:width' % target, webpage, fatal=False))
h = int_or_none(self._html_search_meta(
'twitter:%s:height' % target, webpage, fatal=False))
return w, h
if thumbnail:
thumbnail_w, thumbnail_h = _find_dimension('image')
thumbnails.append({
'url': thumbnail,
'width': thumbnail_w,
'height': thumbnail_h,
})
video_w, video_h = _find_dimension('player')
formats[0].update({
'width': video_w,
'height': video_h,
})
return {
'id': video_id,
'title': 'Twitter Video',
'formats': formats,
'thumbnails': thumbnails,
}
class TwitterBroadcastIE(TwitterBaseIE, PeriscopeBaseIE):
IE_NAME = 'twitter:broadcast'
_VALID_URL = TwitterBaseIE._BASE_REGEX + r'i/broadcasts/(?P<id>[0-9a-zA-Z]{13})'
_TEST = {
# untitled Periscope video
'url': 'https://twitter.com/i/broadcasts/1yNGaQLWpejGj',
'info_dict': {
'id': '1yNGaQLWpejGj',
'ext': 'mp4',
'title': 'Andrea May Sahouri - Periscope Broadcast',
'uploader': 'Andrea May Sahouri',
'uploader_id': '1PXEdBZWpGwKe',
},
}
def _real_extract(self, url):
broadcast_id = self._match_id(url)
broadcast = self._call_api(
'broadcasts/show.json', broadcast_id,
{'ids': broadcast_id})['broadcasts'][broadcast_id]
info = self._parse_broadcast_data(broadcast, broadcast_id)
media_key = broadcast['media_key']
source = self._call_api(
'live_video_stream/status/' + media_key, media_key)['source']
m3u8_url = source.get('noRedirectPlaybackUrl') or source['location']
if '/live_video_stream/geoblocked/' in m3u8_url:
self.raise_geo_restricted()
m3u8_id = compat_parse_qs(compat_urllib_parse_urlparse(
m3u8_url).query).get('type', [None])[0]
state, width, height = self._extract_common_format_info(broadcast)
info['formats'] = self._extract_pscp_m3u8_formats(
m3u8_url, broadcast_id, m3u8_id, state, width, height)
return info
|
unlicense
|
3568fb744b11e17a0b4f0f5b8b49e6a0
| 40.639098
| 217
| 0.519863
| 3.425708
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/einthusan.py
|
15
|
3720
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_str,
compat_urlparse,
)
from ..utils import (
extract_attributes,
ExtractorError,
get_elements_by_class,
urlencode_postdata,
)
class EinthusanIE(InfoExtractor):
_VALID_URL = r'https?://(?P<host>einthusan\.(?:tv|com|ca))/movie/watch/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://einthusan.tv/movie/watch/9097/',
'md5': 'ff0f7f2065031b8a2cf13a933731c035',
'info_dict': {
'id': '9097',
'ext': 'mp4',
'title': 'Ae Dil Hai Mushkil',
'description': 'md5:33ef934c82a671a94652a9b4e54d931b',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'url': 'https://einthusan.tv/movie/watch/51MZ/?lang=hindi',
'only_matching': True,
}, {
'url': 'https://einthusan.com/movie/watch/9097/',
'only_matching': True,
}, {
'url': 'https://einthusan.ca/movie/watch/4E9n/?lang=hindi',
'only_matching': True,
}]
# reversed from jsoncrypto.prototype.decrypt() in einthusan-PGMovieWatcher.js
def _decrypt(self, encrypted_data, video_id):
return self._parse_json(compat_b64decode((
encrypted_data[:10] + encrypted_data[-1] + encrypted_data[12:-1]
)).decode('utf-8'), video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host')
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h3>([^<]+)</h3>', webpage, 'title')
player_params = extract_attributes(self._search_regex(
r'(<section[^>]+id="UIVideoPlayer"[^>]+>)', webpage, 'player parameters'))
page_id = self._html_search_regex(
'<html[^>]+data-pageid="([^"]+)"', webpage, 'page ID')
video_data = self._download_json(
'https://%s/ajax/movie/watch/%s/' % (host, video_id), video_id,
data=urlencode_postdata({
'xEvent': 'UIVideoPlayer.PingOutcome',
'xJson': json.dumps({
'EJOutcomes': player_params['data-ejpingables'],
'NativeHLS': False
}),
'arcVersion': 3,
'appVersion': 59,
'gorilla.csrf.Token': page_id,
}))['Data']
if isinstance(video_data, compat_str) and video_data.startswith('/ratelimited/'):
raise ExtractorError(
'Download rate reached. Please try again later.', expected=True)
ej_links = self._decrypt(video_data['EJLinks'], video_id)
formats = []
m3u8_url = ej_links.get('HLSLink')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native'))
mp4_url = ej_links.get('MP4Link')
if mp4_url:
formats.append({
'url': mp4_url,
})
self._sort_formats(formats)
description = get_elements_by_class('synopsis', webpage)[0]
thumbnail = self._html_search_regex(
r'''<img[^>]+src=(["'])(?P<url>(?!\1).+?/moviecovers/(?!\1).+?)\1''',
webpage, 'thumbnail url', fatal=False, group='url')
if thumbnail is not None:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
}
|
unlicense
|
5d9863e4aa61949018bc7ca6038ecdf0
| 32.513514
| 92
| 0.54086
| 3.454039
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/popcorntv.py
|
20
|
2686
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
unified_timestamp,
)
class PopcornTVIE(InfoExtractor):
_VALID_URL = r'https?://[^/]+\.popcorntv\.it/guarda/(?P<display_id>[^/]+)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://animemanga.popcorntv.it/guarda/food-wars-battaglie-culinarie-episodio-01/9183',
'md5': '47d65a48d147caf692ab8562fe630b45',
'info_dict': {
'id': '9183',
'display_id': 'food-wars-battaglie-culinarie-episodio-01',
'ext': 'mp4',
'title': 'Food Wars, Battaglie Culinarie | Episodio 01',
'description': 'md5:b8bea378faae4651d3b34c6e112463d0',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1497610857,
'upload_date': '20170616',
'duration': 1440,
'view_count': int,
},
}, {
'url': 'https://cinema.popcorntv.it/guarda/smash-cut/10433',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id, video_id = mobj.group('display_id', 'id')
webpage = self._download_webpage(url, display_id)
m3u8_url = extract_attributes(
self._search_regex(
r'(<link[^>]+itemprop=["\'](?:content|embed)Url[^>]*>)',
webpage, 'content'
))['href']
formats = self._extract_m3u8_formats(
m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls')
title = self._search_regex(
r'<h1[^>]+itemprop=["\']name[^>]*>([^<]+)', webpage,
'title', default=None) or self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<article[^>]+itemprop=["\']description[^>]*>(.+?)</article>',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
timestamp = unified_timestamp(self._html_search_meta(
'uploadDate', webpage, 'timestamp'))
duration = int_or_none(self._html_search_meta(
'duration', webpage), invscale=60)
view_count = int_or_none(self._html_search_meta(
'interactionCount', webpage, 'view count'))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
unlicense
|
8b0306635290beddb9d558acae3e4c56
| 34.342105
| 103
| 0.540953
| 3.46134
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/corus.py
|
13
|
6404
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .theplatform import ThePlatformFeedIE
from ..utils import (
dict_get,
ExtractorError,
float_or_none,
int_or_none,
)
class CorusIE(ThePlatformFeedIE):
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?P<domain>
(?:
globaltv|
etcanada|
seriesplus|
wnetwork|
ytv
)\.com|
(?:
hgtv|
foodnetwork|
slice|
history|
showcase|
bigbrothercanada|
abcspark|
disney(?:channel|lachaine)
)\.ca
)
/(?:[^/]+/)*
(?:
video\.html\?.*?\bv=|
videos?/(?:[^/]+/)*(?:[a-z0-9-]+-)?
)
(?P<id>
[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}|
(?:[A-Z]{4})?\d{12,20}
)
'''
_TESTS = [{
'url': 'http://www.hgtv.ca/shows/bryan-inc/videos/movie-night-popcorn-with-bryan-870923331648/',
'info_dict': {
'id': '870923331648',
'ext': 'mp4',
'title': 'Movie Night Popcorn with Bryan',
'description': 'Bryan whips up homemade popcorn, the old fashion way for Jojo and Lincoln.',
'upload_date': '20170206',
'timestamp': 1486392197,
},
'params': {
'format': 'bestvideo',
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON'],
}, {
'url': 'http://www.foodnetwork.ca/shows/chopped/video/episode/chocolate-obsession/video.html?v=872683587753',
'only_matching': True,
}, {
'url': 'http://etcanada.com/video/873675331955/meet-the-survivor-game-changers-castaways-part-2/',
'only_matching': True,
}, {
'url': 'http://www.history.ca/the-world-without-canada/video/full-episodes/natural-resources/video.html?v=955054659646#video',
'only_matching': True,
}, {
'url': 'http://www.showcase.ca/eyewitness/video/eyewitness++106/video.html?v=955070531919&p=1&s=da#video',
'only_matching': True,
}, {
'url': 'http://www.bigbrothercanada.ca/video/1457812035894/',
'only_matching': True
}, {
'url': 'https://www.bigbrothercanada.ca/video/big-brother-canada-704/1457812035894/',
'only_matching': True
}, {
'url': 'https://www.seriesplus.com/emissions/dre-mary-mort-sur-ordonnance/videos/deux-coeurs-battant/SERP0055626330000200/',
'only_matching': True
}, {
'url': 'https://www.disneychannel.ca/shows/gabby-duran-the-unsittables/video/crybaby-duran-clip/2f557eec-0588-11ea-ae2b-e2c6776b770e/',
'only_matching': True
}]
_GEO_BYPASS = False
_SITE_MAP = {
'globaltv': 'series',
'etcanada': 'series',
'foodnetwork': 'food',
'bigbrothercanada': 'series',
'disneychannel': 'disneyen',
'disneylachaine': 'disneyfr',
}
def _real_extract(self, url):
domain, video_id = re.match(self._VALID_URL, url).groups()
site = domain.split('.')[0]
path = self._SITE_MAP.get(site, site)
if path != 'series':
path = 'migration/' + path
video = self._download_json(
'https://globalcontent.corusappservices.com/templates/%s/playlist/' % path,
video_id, query={'byId': video_id},
headers={'Accept': 'application/json'})[0]
title = video['title']
formats = []
for source in video.get('sources', []):
smil_url = source.get('file')
if not smil_url:
continue
source_type = source.get('type')
note = 'Downloading%s smil file' % (' ' + source_type if source_type else '')
resp = self._download_webpage(
smil_url, video_id, note, fatal=False,
headers=self.geo_verification_headers())
if not resp:
continue
error = self._parse_json(resp, video_id, fatal=False)
if error:
if error.get('exception') == 'GeoLocationBlocked':
self.raise_geo_restricted(countries=['CA'])
raise ExtractorError(error['description'])
smil = self._parse_xml(resp, video_id, fatal=False)
if smil is None:
continue
namespace = self._parse_smil_namespace(smil)
formats.extend(self._parse_smil_formats(
smil, smil_url, video_id, namespace))
if not formats and video.get('drm'):
raise ExtractorError('This video is DRM protected.', expected=True)
self._sort_formats(formats)
subtitles = {}
for track in video.get('tracks', []):
track_url = track.get('file')
if not track_url:
continue
lang = 'fr' if site in ('disneylachaine', 'seriesplus') else 'en'
subtitles.setdefault(lang, []).append({'url': track_url})
metadata = video.get('metadata') or {}
get_number = lambda x: int_or_none(video.get('pl1$' + x) or metadata.get(x + 'Number'))
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': dict_get(video, ('defaultThumbnailUrl', 'thumbnail', 'image')),
'description': video.get('description'),
'timestamp': int_or_none(video.get('availableDate'), 1000),
'subtitles': subtitles,
'duration': float_or_none(metadata.get('duration')),
'series': dict_get(video, ('show', 'pl1$show')),
'season_number': get_number('season'),
'episode_number': get_number('episode'),
}
|
unlicense
|
9e53b692347f03e80ed51721b66146c9
| 39.025
| 143
| 0.482042
| 3.86715
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/meta.py
|
81
|
2623
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .pladform import PladformIE
from ..utils import (
unescapeHTML,
int_or_none,
ExtractorError,
)
class METAIE(InfoExtractor):
_VALID_URL = r'https?://video\.meta\.ua/(?:iframe/)?(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://video.meta.ua/5502115.video',
'md5': '71b6f3ee274bef16f1ab410f7f56b476',
'info_dict': {
'id': '5502115',
'ext': 'mp4',
'title': 'Sony Xperia Z camera test [HQ]',
'description': 'Xperia Z shoots video in FullHD HDR.',
'uploader_id': 'nomobile',
'uploader': 'CHЁZA.TV',
'upload_date': '20130211',
},
'add_ie': ['Youtube'],
}, {
'url': 'http://video.meta.ua/iframe/5502115',
'only_matching': True,
}, {
# pladform embed
'url': 'http://video.meta.ua/7121015.video',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
st_html5 = self._search_regex(
r"st_html5\s*=\s*'#([^']+)'", webpage, 'uppod html5 st', default=None)
if st_html5:
# uppod st decryption algorithm is reverse engineered from function un(s) at uppod.js
json_str = ''
for i in range(0, len(st_html5), 3):
json_str += '�%s;' % st_html5[i:i + 3]
uppod_data = self._parse_json(unescapeHTML(json_str), video_id)
error = uppod_data.get('customnotfound')
if error:
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
video_url = uppod_data['file']
info = {
'id': video_id,
'url': video_url,
'title': uppod_data.get('comment') or self._og_search_title(webpage),
'description': self._og_search_description(webpage, default=None),
'thumbnail': uppod_data.get('poster') or self._og_search_thumbnail(webpage),
'duration': int_or_none(self._og_search_property(
'video:duration', webpage, default=None)),
}
if 'youtube.com/' in video_url:
info.update({
'_type': 'url_transparent',
'ie_key': 'Youtube',
})
return info
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
|
unlicense
|
f103de27572110d1285338173b5fb426
| 34.917808
| 97
| 0.522121
| 3.500668
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/vidbit.py
|
64
|
2917
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
js_to_json,
remove_end,
unified_strdate,
)
class VidbitIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vidbit\.co/(?:watch|embed)\?.*?\bv=(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://www.vidbit.co/watch?v=jkL2yDOEq2',
'md5': '1a34b7f14defe3b8fafca9796892924d',
'info_dict': {
'id': 'jkL2yDOEq2',
'ext': 'mp4',
'title': 'Intro to VidBit',
'description': 'md5:5e0d6142eec00b766cbf114bfd3d16b7',
'thumbnail': r're:https?://.*\.jpg$',
'upload_date': '20160618',
'view_count': int,
'comment_count': int,
}
}, {
'url': 'http://www.vidbit.co/embed?v=jkL2yDOEq2&auto=0&water=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
compat_urlparse.urljoin(url, '/watch?v=%s' % video_id), video_id)
video_url, title = [None] * 2
config = self._parse_json(self._search_regex(
r'(?s)\.setup\(({.+?})\);', webpage, 'setup', default='{}'),
video_id, transform_source=js_to_json)
if config:
if config.get('file'):
video_url = compat_urlparse.urljoin(url, config['file'])
title = config.get('title')
if not video_url:
video_url = compat_urlparse.urljoin(url, self._search_regex(
r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'video URL', group='url'))
if not title:
title = remove_end(
self._html_search_regex(
(r'<h1>(.+?)</h1>', r'<title>(.+?)</title>'),
webpage, 'title', default=None) or self._og_search_title(webpage),
' - VidBit')
description = self._html_search_meta(
('description', 'og:description', 'twitter:description'),
webpage, 'description')
upload_date = unified_strdate(self._html_search_meta(
'datePublished', webpage, 'upload date'))
view_count = int_or_none(self._search_regex(
r'<strong>(\d+)</strong> views',
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
r'id=["\']cmt_num["\'][^>]*>\((\d+)\)',
webpage, 'comment count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'upload_date': upload_date,
'view_count': view_count,
'comment_count': comment_count,
}
|
unlicense
|
c4911d7ed3ea1e0f806e02611cb6730b
| 33.72619
| 93
| 0.511484
| 3.480907
| false
| true
| false
| false
|
rbrito/pkg-youtube-dl
|
test/test_YoutubeDL.py
|
4
|
40059
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_str, compat_urllib_error
from youtube_dl.extractor import YoutubeIE
from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.postprocessor.common import PostProcessor
from youtube_dl.utils import ExtractorError, match_filter_func
TEST_URL = 'http://localhost/sample.mp4'
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
def _make_result(formats, **kwargs):
res = {
'formats': formats,
'id': 'testid',
'title': 'testttitle',
'extractor': 'testex',
'extractor_key': 'TestEx',
}
res.update(**kwargs)
return res
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 460, 'url': TEST_URL},
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
]
info_dict = _make_result(formats)
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
{'ext': 'mp4', 'height': 720, 'url': TEST_URL},
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'flv')
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
{'format_id': 'example-with-dashes', 'ext': 'webm', 'preference': 1, 'url': TEST_URL},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': '20/47'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '20/71/worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL()
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '2')
ydl = YDL({'format': 'webm/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '3gp/40/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL({'format': 'example-with-dashes'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'example-with-dashes')
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-high')
ydl = YDL({'format': 'worstaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-low')
formats = [
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-high')
def test_format_selection_audio_exts(self):
formats = [
{'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'aac-64')
ydl = YDL({'format': 'mp3'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'mp3-64')
ydl = YDL({'prefer_free_formats': True})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'ogg-64')
def test_format_selection_video(self):
formats = [
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-high')
ydl = YDL({'format': 'worstvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
ydl = YDL({'format': 'bestvideo[format_id^=dash][format_id$=low]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
formats = [
{'format_id': 'vid-vcodec-dot', 'ext': 'mp4', 'preference': 1, 'vcodec': 'avc1.123456', 'acodec': 'none', 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo[vcodec=avc1.123456]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-vcodec-dot')
def test_format_selection_string_ops(self):
formats = [
{'format_id': 'abc-cba', 'ext': 'mp4', 'url': TEST_URL},
{'format_id': 'zxc-cxz', 'ext': 'webm', 'url': TEST_URL},
]
info_dict = _make_result(formats)
# equals (=)
ydl = YDL({'format': '[format_id=abc-cba]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'abc-cba')
# does not equal (!=)
ydl = YDL({'format': '[format_id!=abc-cba]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'zxc-cxz')
ydl = YDL({'format': '[format_id!=abc-cba][format_id!=zxc-cxz]'})
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
# starts with (^=)
ydl = YDL({'format': '[format_id^=abc]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'abc-cba')
# does not start with (!^=)
ydl = YDL({'format': '[format_id!^=abc]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'zxc-cxz')
ydl = YDL({'format': '[format_id!^=abc][format_id!^=zxc]'})
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
# ends with ($=)
ydl = YDL({'format': '[format_id$=cba]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'abc-cba')
# does not end with (!$=)
ydl = YDL({'format': '[format_id!$=cba]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'zxc-cxz')
ydl = YDL({'format': '[format_id!$=cba][format_id!$=cxz]'})
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
# contains (*=)
ydl = YDL({'format': '[format_id*=bc-cb]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'abc-cba')
# does not contain (!*=)
ydl = YDL({'format': '[format_id!*=bc-cb]'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'zxc-cxz')
ydl = YDL({'format': '[format_id!*=abc][format_id!*=zxc]'})
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
ydl = YDL({'format': '[format_id!*=-]'})
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
def test_youtube_format_selection(self):
order = [
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '84', '102', '83', '101', '82', '100',
# Dash video
'137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
'141', '172', '140', '171', '139',
]
def format_info(f_id):
info = YoutubeIE._formats[f_id].copy()
# XXX: In real cases InfoExtractor._parse_mpd_formats() fills up 'acodec'
# and 'vcodec', while in tests such information is incomplete since
# commit a6c2c24479e5f4827ceb06f64d855329c0a6f593
# test_YoutubeDL.test_youtube_format_selection is broken without
# this fix
if 'acodec' in info and 'vcodec' not in info:
info['vcodec'] = 'none'
elif 'vcodec' in info and 'acodec' not in info:
info['acodec'] = 'none'
info['format_id'] = f_id
info['url'] = 'url:' + f_id
return info
formats_order = [format_info(f_id) for f_id in order]
info_dict = _make_result(list(formats_order), extractor='youtube')
ydl = YDL({'format': 'bestvideo+bestaudio'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '137+141')
self.assertEqual(downloaded['ext'], 'mp4')
info_dict = _make_result(list(formats_order), extractor='youtube')
ydl = YDL({'format': 'bestvideo[height>=999999]+bestaudio/best'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '38')
info_dict = _make_result(list(formats_order), extractor='youtube')
ydl = YDL({'format': 'bestvideo/best,bestaudio'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
self.assertEqual(downloaded_ids, ['137', '141'])
info_dict = _make_result(list(formats_order), extractor='youtube')
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])+bestaudio'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
self.assertEqual(downloaded_ids, ['137+141', '248+141'])
info_dict = _make_result(list(formats_order), extractor='youtube')
ydl = YDL({'format': '(bestvideo[ext=mp4],bestvideo[ext=webm])[height<=720]+bestaudio'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
self.assertEqual(downloaded_ids, ['136+141', '247+141'])
info_dict = _make_result(list(formats_order), extractor='youtube')
ydl = YDL({'format': '(bestvideo[ext=none]/bestvideo[ext=webm])+bestaudio'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
self.assertEqual(downloaded_ids, ['248+141'])
for f1, f2 in zip(formats_order, formats_order[1:]):
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1['format_id'])
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1['format_id'])
def test_audio_only_extractor_format_selection(self):
# For extractors with incomplete formats (all formats are audio-only or
# video-only) best and worst should fallback to corresponding best/worst
# video-only or audio-only formats (as per
# https://github.com/ytdl-org/youtube-dl/pull/5556)
formats = [
{'format_id': 'low', 'ext': 'mp3', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'high', 'ext': 'mp3', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'high')
ydl = YDL({'format': 'worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'low')
def test_format_not_available(self):
formats = [
{'format_id': 'regular', 'ext': 'mp4', 'height': 360, 'url': TEST_URL},
{'format_id': 'video', 'ext': 'mp4', 'height': 720, 'acodec': 'none', 'url': TEST_URL},
]
info_dict = _make_result(formats)
# This must fail since complete video-audio format does not match filter
# and extractor does not provide incomplete only formats (i.e. only
# video-only or audio-only).
ydl = YDL({'format': 'best[height>360]'})
self.assertRaises(ExtractorError, ydl.process_ie_result, info_dict.copy())
def test_format_selection_issue_10083(self):
# See https://github.com/ytdl-org/youtube-dl/issues/10083
formats = [
{'format_id': 'regular', 'height': 360, 'url': TEST_URL},
{'format_id': 'video', 'height': 720, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'audio', 'vcodec': 'none', 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best[height>360]/bestvideo[height>360]+bestaudio'})
ydl.process_ie_result(info_dict.copy())
self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'video+audio')
def test_invalid_format_specs(self):
def assert_syntax_error(format_spec):
ydl = YDL({'format': format_spec})
info_dict = _make_result([{'format_id': 'foo', 'url': TEST_URL}])
self.assertRaises(SyntaxError, ydl.process_ie_result, info_dict)
assert_syntax_error('bestvideo,,best')
assert_syntax_error('+bestaudio')
assert_syntax_error('bestvideo+')
assert_syntax_error('/')
assert_syntax_error('bestvideo+bestvideo+bestaudio')
def test_format_filtering(self):
formats = [
{'format_id': 'A', 'filesize': 500, 'width': 1000},
{'format_id': 'B', 'filesize': 1000, 'width': 500},
{'format_id': 'C', 'filesize': 1000, 'width': 400},
{'format_id': 'D', 'filesize': 2000, 'width': 600},
{'format_id': 'E', 'filesize': 3000},
{'format_id': 'F'},
{'format_id': 'G', 'filesize': 1000000},
]
for f in formats:
f['url'] = 'http://_/'
f['ext'] = 'unknown'
info_dict = _make_result(formats)
ydl = YDL({'format': 'best[filesize<3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'D')
ydl = YDL({'format': 'best[filesize<=3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'F')
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'B')
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'C')
ydl = YDL({'format': '[filesize>?1]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
ydl = YDL({'format': '[filesize<1M]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': '[filesize<1MiB]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
ydl = YDL({'format': 'all[width>=400][width<=600]'})
ydl.process_ie_result(info_dict)
downloaded_ids = [info['format_id'] for info in ydl.downloaded_info_dicts]
self.assertEqual(downloaded_ids, ['B', 'C', 'D'])
ydl = YDL({'format': 'best[height<40]'})
try:
ydl.process_ie_result(info_dict)
except ExtractorError:
pass
self.assertEqual(ydl.downloaded_info_dicts, [])
def test_default_format_spec(self):
ydl = YDL({'simulate': True})
self.assertEqual(ydl._default_format_spec({}), 'bestvideo+bestaudio/best')
ydl = YDL({})
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio')
ydl = YDL({'simulate': True})
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'bestvideo+bestaudio/best')
ydl = YDL({'outtmpl': '-'})
self.assertEqual(ydl._default_format_spec({}), 'best/bestvideo+bestaudio')
ydl = YDL({})
self.assertEqual(ydl._default_format_spec({}, download=False), 'bestvideo+bestaudio/best')
self.assertEqual(ydl._default_format_spec({'is_live': True}), 'best/bestvideo+bestaudio')
class TestYoutubeDL(unittest.TestCase):
def test_subtitles(self):
def s_formats(lang, autocaption=False):
return [{
'ext': ext,
'url': 'http://localhost/video.%s.%s' % (lang, ext),
'_auto': autocaption,
} for ext in ['vtt', 'srt', 'ass']]
subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es'])
auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es'])
info_dict = {
'id': 'test',
'title': 'Test',
'url': 'http://localhost/video.mp4',
'subtitles': subtitles,
'automatic_captions': auto_captions,
'extractor': 'TEST',
}
def get_info(params={}):
params.setdefault('simulate', True)
ydl = YDL(params)
ydl.report_warning = lambda *args, **kargs: None
return ydl.process_video_result(info_dict, download=False)
result = get_info()
self.assertFalse(result.get('requested_subtitles'))
self.assertEqual(result['subtitles'], subtitles)
self.assertEqual(result['automatic_captions'], auto_captions)
result = get_info({'writesubtitles': True})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['en']))
self.assertTrue(subs['en'].get('data') is None)
self.assertEqual(subs['en']['ext'], 'ass')
result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'})
subs = result['requested_subtitles']
self.assertEqual(subs['en']['ext'], 'srt')
result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
self.assertFalse(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto'])
result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
self.assertTrue(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto'])
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
def test_prepare_filename(self):
info = {
'id': '1234',
'ext': 'mp4',
'width': None,
'height': 1080,
'title1': '$PATH',
'title2': '%PATH%',
}
def fname(templ, na_placeholder='NA'):
params = {'outtmpl': templ}
if na_placeholder != 'NA':
params['outtmpl_na_placeholder'] = na_placeholder
ydl = YoutubeDL(params)
return ydl.prepare_filename(info)
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(id)s.%(ext)s'
# Replace missing fields with 'NA' by default
self.assertEqual(fname(NA_TEST_OUTTMPL), 'NA-NA-1234.mp4')
# Or by provided placeholder
self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder='none'), 'none-none-1234.mp4')
self.assertEqual(fname(NA_TEST_OUTTMPL, na_placeholder=''), '--1234.mp4')
self.assertEqual(fname('%(height)d.%(ext)s'), '1080.mp4')
self.assertEqual(fname('%(height)6d.%(ext)s'), ' 1080.mp4')
self.assertEqual(fname('%(height)-6d.%(ext)s'), '1080 .mp4')
self.assertEqual(fname('%(height)06d.%(ext)s'), '001080.mp4')
self.assertEqual(fname('%(height) 06d.%(ext)s'), ' 01080.mp4')
self.assertEqual(fname('%(height) 06d.%(ext)s'), ' 01080.mp4')
self.assertEqual(fname('%(height)0 6d.%(ext)s'), ' 01080.mp4')
self.assertEqual(fname('%(height)0 6d.%(ext)s'), ' 01080.mp4')
self.assertEqual(fname('%(height) 0 6d.%(ext)s'), ' 01080.mp4')
self.assertEqual(fname('%%'), '%')
self.assertEqual(fname('%%%%'), '%%')
self.assertEqual(fname('%%(height)06d.%(ext)s'), '%(height)06d.mp4')
self.assertEqual(fname('%(width)06d.%(ext)s'), 'NA.mp4')
self.assertEqual(fname('%(width)06d.%%(ext)s'), 'NA.%(ext)s')
self.assertEqual(fname('%%(width)06d.%(ext)s'), '%(width)06d.mp4')
self.assertEqual(fname('Hello %(title1)s'), 'Hello $PATH')
self.assertEqual(fname('Hello %(title2)s'), 'Hello %PATH%')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), r'^\s*10k$')
assertRegexpMatches(self, ydl._format_note({
'fps': 30,
}), r'^30fps$')
def test_postprocessors(self):
filename = 'post-processor-testfile.mp4'
audiofile = filename + '.mp3'
class SimplePP(PostProcessor):
def run(self, info):
with open(audiofile, 'wt') as f:
f.write('EXAMPLE')
return [info['filepath']], info
def run_pp(params, PP):
with open(filename, 'wt') as f:
f.write('EXAMPLE')
ydl = YoutubeDL(params)
ydl.add_post_processor(PP())
ydl.post_process(filename, {'filepath': filename})
run_pp({'keepvideo': True}, SimplePP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(filename)
os.unlink(audiofile)
run_pp({'keepvideo': False}, SimplePP)
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(audiofile)
class ModifierPP(PostProcessor):
def run(self, info):
with open(info['filepath'], 'wt') as f:
f.write('MODIFIED')
return [], info
run_pp({'keepvideo': False}, ModifierPP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
os.unlink(filename)
def test_match_filter(self):
class FilterYDL(YDL):
def __init__(self, *args, **kwargs):
super(FilterYDL, self).__init__(*args, **kwargs)
self.params['simulate'] = True
def process_info(self, info_dict):
super(YDL, self).process_info(info_dict)
def _match_entry(self, info_dict, incomplete):
res = super(FilterYDL, self)._match_entry(info_dict, incomplete)
if res is None:
self.downloaded_info_dicts.append(info_dict)
return res
first = {
'id': '1',
'url': TEST_URL,
'title': 'one',
'extractor': 'TEST',
'duration': 30,
'filesize': 10 * 1024,
'playlist_id': '42',
'uploader': "變態妍字幕版 太妍 тест",
'creator': "тест ' 123 ' тест--",
}
second = {
'id': '2',
'url': TEST_URL,
'title': 'two',
'extractor': 'TEST',
'duration': 10,
'description': 'foo',
'filesize': 5 * 1024,
'playlist_id': '43',
'uploader': "тест 123",
}
videos = [first, second]
def get_videos(filter_=None):
ydl = FilterYDL({'match_filter': filter_})
for v in videos:
ydl.process_ie_result(v, download=True)
return [v['id'] for v in ydl.downloaded_info_dicts]
res = get_videos()
self.assertEqual(res, ['1', '2'])
def f(v):
if v['id'] == '1':
return None
else:
return 'Video id is not 1'
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func('duration < 30')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('description = foo')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('description =? foo')
res = get_videos(f)
self.assertEqual(res, ['1', '2'])
f = match_filter_func('filesize > 5KiB')
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func('playlist_id = 42')
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func('uploader = "變態妍字幕版 太妍 тест"')
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func('uploader != "變態妍字幕版 太妍 тест"')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('creator = "тест \' 123 \' тест--"')
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func("creator = 'тест \\' 123 \\' тест--'")
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func(r"creator = 'тест \' 123 \' тест--' & duration > 30")
res = get_videos(f)
self.assertEqual(res, [])
def test_playlist_items_selection(self):
entries = [{
'id': compat_str(i),
'title': compat_str(i),
'url': TEST_URL,
} for i in range(1, 5)]
playlist = {
'_type': 'playlist',
'id': 'test',
'entries': entries,
'extractor': 'test:playlist',
'extractor_key': 'test:playlist',
'webpage_url': 'http://example.com',
}
def get_downloaded_info_dicts(params):
ydl = YDL(params)
# make a deep copy because the dictionary and nested entries
# can be modified
ydl.process_ie_result(copy.deepcopy(playlist))
return ydl.downloaded_info_dicts
def get_ids(params):
return [int(v['id']) for v in get_downloaded_info_dicts(params)]
result = get_ids({})
self.assertEqual(result, [1, 2, 3, 4])
result = get_ids({'playlistend': 10})
self.assertEqual(result, [1, 2, 3, 4])
result = get_ids({'playlistend': 2})
self.assertEqual(result, [1, 2])
result = get_ids({'playliststart': 10})
self.assertEqual(result, [])
result = get_ids({'playliststart': 2})
self.assertEqual(result, [2, 3, 4])
result = get_ids({'playlist_items': '2-4'})
self.assertEqual(result, [2, 3, 4])
result = get_ids({'playlist_items': '2,4'})
self.assertEqual(result, [2, 4])
result = get_ids({'playlist_items': '10'})
self.assertEqual(result, [])
result = get_ids({'playlist_items': '3-10'})
self.assertEqual(result, [3, 4])
result = get_ids({'playlist_items': '2-4,3-4,3'})
self.assertEqual(result, [2, 3, 4])
# Tests for https://github.com/ytdl-org/youtube-dl/issues/10591
# @{
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
self.assertEqual(result[0]['playlist_index'], 2)
self.assertEqual(result[1]['playlist_index'], 3)
result = get_downloaded_info_dicts({'playlist_items': '2-4,3-4,3'})
self.assertEqual(result[0]['playlist_index'], 2)
self.assertEqual(result[1]['playlist_index'], 3)
self.assertEqual(result[2]['playlist_index'], 4)
result = get_downloaded_info_dicts({'playlist_items': '4,2'})
self.assertEqual(result[0]['playlist_index'], 4)
self.assertEqual(result[1]['playlist_index'], 2)
# @}
def test_urlopen_no_file_protocol(self):
# see https://github.com/ytdl-org/youtube-dl/issues/8227
ydl = YDL()
self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd')
def test_do_not_override_ie_key_in_url_transparent(self):
ydl = YDL()
class Foo1IE(InfoExtractor):
_VALID_URL = r'foo1:'
def _real_extract(self, url):
return {
'_type': 'url_transparent',
'url': 'foo2:',
'ie_key': 'Foo2',
'title': 'foo1 title',
'id': 'foo1_id',
}
class Foo2IE(InfoExtractor):
_VALID_URL = r'foo2:'
def _real_extract(self, url):
return {
'_type': 'url',
'url': 'foo3:',
'ie_key': 'Foo3',
}
class Foo3IE(InfoExtractor):
_VALID_URL = r'foo3:'
def _real_extract(self, url):
return _make_result([{'url': TEST_URL}], title='foo3 title')
ydl.add_info_extractor(Foo1IE(ydl))
ydl.add_info_extractor(Foo2IE(ydl))
ydl.add_info_extractor(Foo3IE(ydl))
ydl.extract_info('foo1:')
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['url'], TEST_URL)
self.assertEqual(downloaded['title'], 'foo1 title')
self.assertEqual(downloaded['id'], 'testid')
self.assertEqual(downloaded['extractor'], 'testex')
self.assertEqual(downloaded['extractor_key'], 'TestEx')
# Test case for https://github.com/ytdl-org/youtube-dl/issues/27064
def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
class _YDL(YDL):
def __init__(self, *args, **kwargs):
super(_YDL, self).__init__(*args, **kwargs)
def trouble(self, s, tb=None):
pass
ydl = _YDL({
'format': 'extra',
'ignoreerrors': True,
})
class VideoIE(InfoExtractor):
_VALID_URL = r'video:(?P<id>\d+)'
def _real_extract(self, url):
video_id = self._match_id(url)
formats = [{
'format_id': 'default',
'url': 'url:',
}]
if video_id == '0':
raise ExtractorError('foo')
if video_id == '2':
formats.append({
'format_id': 'extra',
'url': TEST_URL,
})
return {
'id': video_id,
'title': 'Video %s' % video_id,
'formats': formats,
}
class PlaylistIE(InfoExtractor):
_VALID_URL = r'playlist:'
def _entries(self):
for n in range(3):
video_id = compat_str(n)
yield {
'_type': 'url_transparent',
'ie_key': VideoIE.ie_key(),
'id': video_id,
'url': 'video:%s' % video_id,
'title': 'Video Transparent %s' % video_id,
}
def _real_extract(self, url):
return self.playlist_result(self._entries())
ydl.add_info_extractor(VideoIE(ydl))
ydl.add_info_extractor(PlaylistIE(ydl))
info = ydl.extract_info('playlist:')
entries = info['entries']
self.assertEqual(len(entries), 3)
self.assertTrue(entries[0] is None)
self.assertTrue(entries[1] is None)
self.assertEqual(len(ydl.downloaded_info_dicts), 1)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(entries[2], downloaded)
self.assertEqual(downloaded['url'], TEST_URL)
self.assertEqual(downloaded['title'], 'Video Transparent 2')
self.assertEqual(downloaded['id'], '2')
self.assertEqual(downloaded['extractor'], 'Video')
self.assertEqual(downloaded['extractor_key'], 'Video')
if __name__ == '__main__':
unittest.main()
|
unlicense
|
b7d55abe3d0d328a464af32c0ad22d1e
| 38.883234
| 135
| 0.549408
| 3.538741
| false
| true
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/vk.py
|
9
|
25866
|
# coding: utf-8
from __future__ import unicode_literals
import collections
import functools
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
clean_html,
ExtractorError,
get_element_by_class,
int_or_none,
OnDemandPagedList,
orderedSet,
str_or_none,
str_to_int,
unescapeHTML,
unified_timestamp,
url_or_none,
urlencode_postdata,
)
from .dailymotion import DailymotionIE
from .odnoklassniki import OdnoklassnikiIE
from .pladform import PladformIE
from .vimeo import VimeoIE
from .youtube import YoutubeIE
class VKBaseIE(InfoExtractor):
_NETRC_MACHINE = 'vk'
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page, url_handle = self._download_webpage_handle(
'https://vk.com', None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'email': username.encode('cp1251'),
'pass': password.encode('cp1251'),
})
# vk serves two same remixlhk cookies in Set-Cookie header and expects
# first one to be actually set
self._apply_first_set_cookie_header(url_handle, 'remixlhk')
login_page = self._download_webpage(
'https://login.vk.com/?act=login', None,
note='Logging in',
data=urlencode_postdata(login_form))
if re.search(r'onLoginFailed', login_page):
raise ExtractorError(
'Unable to login, incorrect username and/or password', expected=True)
def _real_initialize(self):
self._login()
def _download_payload(self, path, video_id, data, fatal=True):
data['al'] = 1
code, payload = self._download_json(
'https://vk.com/%s.php' % path, video_id,
data=urlencode_postdata(data), fatal=fatal,
headers={'X-Requested-With': 'XMLHttpRequest'})['payload']
if code == '3':
self.raise_login_required()
elif code == '8':
raise ExtractorError(clean_html(payload[0][1:-1]), expected=True)
return payload
class VKIE(VKBaseIE):
IE_NAME = 'vk'
IE_DESC = 'VK'
_VALID_URL = r'''(?x)
https?://
(?:
(?:
(?:(?:m|new)\.)?vk\.com/video_|
(?:www\.)?daxab.com/
)
ext\.php\?(?P<embed_query>.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+).*)|
(?:
(?:(?:m|new)\.)?vk\.com/(?:.+?\?.*?z=)?video|
(?:www\.)?daxab.com/embed/
)
(?P<videoid>-?\d+_\d+)(?:.*\blist=(?P<list_id>[\da-f]+))?
)
'''
_TESTS = [
{
'url': 'http://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521',
'md5': '7babad3b85ea2e91948005b1b8b0cb84',
'info_dict': {
'id': '-77521_162222515',
'ext': 'mp4',
'title': 'ProtivoGunz - Хуёвая песня',
'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*',
'uploader_id': '-77521',
'duration': 195,
'timestamp': 1329049880,
'upload_date': '20120212',
},
},
{
'url': 'http://vk.com/video205387401_165548505',
'info_dict': {
'id': '205387401_165548505',
'ext': 'mp4',
'title': 'No name',
'uploader': 'Tom Cruise',
'uploader_id': '205387401',
'duration': 9,
'timestamp': 1374364108,
'upload_date': '20130720',
}
},
{
'note': 'Embedded video',
'url': 'https://vk.com/video_ext.php?oid=-77521&id=162222515&hash=87b046504ccd8bfa',
'md5': '7babad3b85ea2e91948005b1b8b0cb84',
'info_dict': {
'id': '-77521_162222515',
'ext': 'mp4',
'uploader': 're:(?:Noize MC|Alexander Ilyashenko).*',
'title': 'ProtivoGunz - Хуёвая песня',
'duration': 195,
'upload_date': '20120212',
'timestamp': 1329049880,
'uploader_id': '-77521',
},
},
{
# VIDEO NOW REMOVED
# please update if you find a video whose URL follows the same pattern
'url': 'http://vk.com/video-8871596_164049491',
'md5': 'a590bcaf3d543576c9bd162812387666',
'note': 'Only available for registered users',
'info_dict': {
'id': '-8871596_164049491',
'ext': 'mp4',
'uploader': 'Триллеры',
'title': '► Бойцовский клуб / Fight Club 1999 [HD 720]',
'duration': 8352,
'upload_date': '20121218',
'view_count': int,
},
'skip': 'Removed',
},
{
'url': 'http://vk.com/hd_kino_mania?z=video-43215063_168067957%2F15c66b9b533119788d',
'info_dict': {
'id': '-43215063_168067957',
'ext': 'mp4',
'uploader': 'Bro Mazter',
'title': ' ',
'duration': 7291,
'upload_date': '20140328',
'uploader_id': '223413403',
'timestamp': 1396018030,
},
'skip': 'Requires vk account credentials',
},
{
'url': 'http://m.vk.com/video-43215063_169084319?list=125c627d1aa1cebb83&from=wall-43215063_2566540',
'md5': '0c45586baa71b7cb1d0784ee3f4e00a6',
'note': 'ivi.ru embed',
'info_dict': {
'id': '-43215063_169084319',
'ext': 'mp4',
'title': 'Книга Илая',
'duration': 6771,
'upload_date': '20140626',
'view_count': int,
},
'skip': 'Removed',
},
{
# video (removed?) only available with list id
'url': 'https://vk.com/video30481095_171201961?list=8764ae2d21f14088d4',
'md5': '091287af5402239a1051c37ec7b92913',
'info_dict': {
'id': '30481095_171201961',
'ext': 'mp4',
'title': 'ТюменцевВВ_09.07.2015',
'uploader': 'Anton Ivanov',
'duration': 109,
'upload_date': '20150709',
'view_count': int,
},
'skip': 'Removed',
},
{
# youtube embed
'url': 'https://vk.com/video276849682_170681728',
'info_dict': {
'id': 'V3K4mi0SYkc',
'ext': 'mp4',
'title': "DSWD Awards 'Children's Joy Foundation, Inc.' Certificate of Registration and License to Operate",
'description': 'md5:bf9c26cfa4acdfb146362682edd3827a',
'duration': 178,
'upload_date': '20130116',
'uploader': "Children's Joy Foundation Inc.",
'uploader_id': 'thecjf',
'view_count': int,
},
},
{
# dailymotion embed
'url': 'https://vk.com/video-37468416_456239855',
'info_dict': {
'id': 'k3lz2cmXyRuJQSjGHUv',
'ext': 'mp4',
'title': 'md5:d52606645c20b0ddbb21655adaa4f56f',
'description': 'md5:424b8e88cc873217f520e582ba28bb36',
'uploader': 'AniLibria.Tv',
'upload_date': '20160914',
'uploader_id': 'x1p5vl5',
'timestamp': 1473877246,
},
'params': {
'skip_download': True,
},
},
{
# video key is extra_data not url\d+
'url': 'http://vk.com/video-110305615_171782105',
'md5': 'e13fcda136f99764872e739d13fac1d1',
'info_dict': {
'id': '-110305615_171782105',
'ext': 'mp4',
'title': 'S-Dance, репетиции к The way show',
'uploader': 'THE WAY SHOW | 17 апреля',
'uploader_id': '-110305615',
'timestamp': 1454859345,
'upload_date': '20160207',
},
'params': {
'skip_download': True,
},
},
{
# finished live stream, postlive_mp4
'url': 'https://vk.com/videos-387766?z=video-387766_456242764%2Fpl_-387766_-2',
'info_dict': {
'id': '-387766_456242764',
'ext': 'mp4',
'title': 'ИгроМир 2016 День 1 — Игромания Утром',
'uploader': 'Игромания',
'duration': 5239,
# TODO: use act=show to extract view_count
# 'view_count': int,
'upload_date': '20160929',
'uploader_id': '-387766',
'timestamp': 1475137527,
},
'params': {
'skip_download': True,
},
},
{
# live stream, hls and rtmp links, most likely already finished live
# stream by the time you are reading this comment
'url': 'https://vk.com/video-140332_456239111',
'only_matching': True,
},
{
# removed video, just testing that we match the pattern
'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
'only_matching': True,
},
{
# age restricted video, requires vk account credentials
'url': 'https://vk.com/video205387401_164765225',
'only_matching': True,
},
{
# pladform embed
'url': 'https://vk.com/video-76116461_171554880',
'only_matching': True,
},
{
'url': 'http://new.vk.com/video205387401_165548505',
'only_matching': True,
},
{
# This video is no longer available, because its author has been blocked.
'url': 'https://vk.com/video-10639516_456240611',
'only_matching': True,
},
{
# The video is not available in your region.
'url': 'https://vk.com/video-51812607_171445436',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
mv_data = {}
if video_id:
data = {
'act': 'show_inline',
'video': video_id,
}
# Some videos (removed?) can only be downloaded with list id specified
list_id = mobj.group('list_id')
if list_id:
data['list'] = list_id
payload = self._download_payload('al_video', video_id, data)
info_page = payload[1]
opts = payload[-1]
mv_data = opts.get('mvData') or {}
player = opts.get('player') or {}
else:
video_id = '%s_%s' % (mobj.group('oid'), mobj.group('id'))
info_page = self._download_webpage(
'http://vk.com/video_ext.php?' + mobj.group('embed_query'), video_id)
error_message = self._html_search_regex(
[r'(?s)<!><div[^>]+class="video_layer_message"[^>]*>(.+?)</div>',
r'(?s)<div[^>]+id="video_ext_msg"[^>]*>(.+?)</div>'],
info_page, 'error message', default=None)
if error_message:
raise ExtractorError(error_message, expected=True)
if re.search(r'<!>/login\.php\?.*\bact=security_check', info_page):
raise ExtractorError(
'You are trying to log in from an unusual location. You should confirm ownership at vk.com to log in with this IP.',
expected=True)
ERROR_COPYRIGHT = 'Video %s has been removed from public access due to rightholder complaint.'
ERRORS = {
r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<':
ERROR_COPYRIGHT,
r'>The video .*? was removed from public access by request of the copyright holder.<':
ERROR_COPYRIGHT,
r'<!>Please log in or <':
'Video %s is only available for registered users, '
'use --username and --password options to provide account credentials.',
r'<!>Unknown error':
'Video %s does not exist.',
r'<!>Видео временно недоступно':
'Video %s is temporarily unavailable.',
r'<!>Access denied':
'Access denied to video %s.',
r'<!>Видеозапись недоступна, так как её автор был заблокирован.':
'Video %s is no longer available, because its author has been blocked.',
r'<!>This video is no longer available, because its author has been blocked.':
'Video %s is no longer available, because its author has been blocked.',
r'<!>This video is no longer available, because it has been deleted.':
'Video %s is no longer available, because it has been deleted.',
r'<!>The video .+? is not available in your region.':
'Video %s is not available in your region.',
}
for error_re, error_msg in ERRORS.items():
if re.search(error_re, info_page):
raise ExtractorError(error_msg % video_id, expected=True)
player = self._parse_json(self._search_regex(
r'var\s+playerParams\s*=\s*({.+?})\s*;\s*\n',
info_page, 'player params'), video_id)
youtube_url = YoutubeIE._extract_url(info_page)
if youtube_url:
return self.url_result(youtube_url, YoutubeIE.ie_key())
vimeo_url = VimeoIE._extract_url(url, info_page)
if vimeo_url is not None:
return self.url_result(vimeo_url, VimeoIE.ie_key())
pladform_url = PladformIE._extract_url(info_page)
if pladform_url:
return self.url_result(pladform_url, PladformIE.ie_key())
m_rutube = re.search(
r'\ssrc="((?:https?:)?//rutube\.ru\\?/(?:video|play)\\?/embed(?:.*?))\\?"', info_page)
if m_rutube is not None:
rutube_url = self._proto_relative_url(
m_rutube.group(1).replace('\\', ''))
return self.url_result(rutube_url)
dailymotion_urls = DailymotionIE._extract_urls(info_page)
if dailymotion_urls:
return self.url_result(dailymotion_urls[0], DailymotionIE.ie_key())
odnoklassniki_url = OdnoklassnikiIE._extract_url(info_page)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
m_opts = re.search(r'(?s)var\s+opts\s*=\s*({.+?});', info_page)
if m_opts:
m_opts_url = re.search(r"url\s*:\s*'((?!/\b)[^']+)", m_opts.group(1))
if m_opts_url:
opts_url = m_opts_url.group(1)
if opts_url.startswith('//'):
opts_url = 'http:' + opts_url
return self.url_result(opts_url)
data = player['params'][0]
title = unescapeHTML(data['md_title'])
# 2 = live
# 3 = post live (finished live)
is_live = data.get('live') == 2
if is_live:
title = self._live_title(title)
timestamp = unified_timestamp(self._html_search_regex(
r'class=["\']mv_info_date[^>]+>([^<]+)(?:<|from)', info_page,
'upload date', default=None)) or int_or_none(data.get('date'))
view_count = str_to_int(self._search_regex(
r'class=["\']mv_views_count[^>]+>\s*([\d,.]+)',
info_page, 'view count', default=None))
formats = []
for format_id, format_url in data.items():
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//', 'rtmp')):
continue
if (format_id.startswith(('url', 'cache'))
or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
height = int_or_none(self._search_regex(
r'^(?:url|cache)(\d+)', format_id, 'height', default=None))
formats.append({
'format_id': format_id,
'url': format_url,
'height': height,
})
elif format_id == 'hls':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False, live=is_live))
elif format_id == 'rtmp':
formats.append({
'format_id': format_id,
'url': format_url,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': title,
'thumbnail': data.get('jpg'),
'uploader': data.get('md_author'),
'uploader_id': str_or_none(data.get('author_id') or mv_data.get('authorId')),
'duration': int_or_none(data.get('duration') or mv_data.get('duration')),
'timestamp': timestamp,
'view_count': view_count,
'like_count': int_or_none(mv_data.get('likes')),
'comment_count': int_or_none(mv_data.get('commcount')),
'is_live': is_live,
}
class VKUserVideosIE(VKBaseIE):
IE_NAME = 'vk:uservideos'
IE_DESC = "VK - User's Videos"
_VALID_URL = r'https?://(?:(?:m|new)\.)?vk\.com/videos(?P<id>-?[0-9]+)(?!\?.*\bz=video)(?:[/?#&](?:.*?\bsection=(?P<section>\w+))?|$)'
_TEMPLATE_URL = 'https://vk.com/videos'
_TESTS = [{
'url': 'https://vk.com/videos-767561',
'info_dict': {
'id': '-767561_all',
},
'playlist_mincount': 1150,
}, {
'url': 'https://vk.com/videos-767561?section=uploaded',
'info_dict': {
'id': '-767561_uploaded',
},
'playlist_mincount': 425,
}, {
'url': 'http://vk.com/videos205387401',
'only_matching': True,
}, {
'url': 'http://vk.com/videos-77521',
'only_matching': True,
}, {
'url': 'http://vk.com/videos-97664626?section=all',
'only_matching': True,
}, {
'url': 'http://m.vk.com/videos205387401',
'only_matching': True,
}, {
'url': 'http://new.vk.com/videos205387401',
'only_matching': True,
}]
_PAGE_SIZE = 1000
_VIDEO = collections.namedtuple('Video', ['owner_id', 'id'])
def _fetch_page(self, page_id, section, page):
l = self._download_payload('al_video', page_id, {
'act': 'load_videos_silent',
'offset': page * self._PAGE_SIZE,
'oid': page_id,
'section': section,
})[0][section]['list']
for video in l:
v = self._VIDEO._make(video[:2])
video_id = '%d_%d' % (v.owner_id, v.id)
yield self.url_result(
'http://vk.com/video' + video_id, VKIE.ie_key(), video_id)
def _real_extract(self, url):
page_id, section = re.match(self._VALID_URL, url).groups()
if not section:
section = 'all'
entries = OnDemandPagedList(
functools.partial(self._fetch_page, page_id, section),
self._PAGE_SIZE)
return self.playlist_result(entries, '%s_%s' % (page_id, section))
class VKWallPostIE(VKBaseIE):
IE_NAME = 'vk:wallpost'
_VALID_URL = r'https?://(?:(?:(?:(?:m|new)\.)?vk\.com/(?:[^?]+\?.*\bw=)?wall(?P<id>-?\d+_\d+)))'
_TESTS = [{
# public page URL, audio playlist
'url': 'https://vk.com/bs.official?w=wall-23538238_35',
'info_dict': {
'id': '-23538238_35',
'title': 'Black Shadow - Wall post -23538238_35',
'description': 'md5:3f84b9c4f9ef499731cf1ced9998cc0c',
},
'playlist': [{
'md5': '5ba93864ec5b85f7ce19a9af4af080f6',
'info_dict': {
'id': '135220665_111806521',
'ext': 'mp4',
'title': 'Black Shadow - Слепое Верование',
'duration': 370,
'uploader': 'Black Shadow',
'artist': 'Black Shadow',
'track': 'Слепое Верование',
},
}, {
'md5': '4cc7e804579122b17ea95af7834c9233',
'info_dict': {
'id': '135220665_111802303',
'ext': 'mp4',
'title': 'Black Shadow - Война - Негасимое Бездны Пламя!',
'duration': 423,
'uploader': 'Black Shadow',
'artist': 'Black Shadow',
'track': 'Война - Негасимое Бездны Пламя!',
},
}],
'params': {
'skip_download': True,
'usenetrc': True,
},
'skip': 'Requires vk account credentials',
}, {
# single YouTube embed, no leading -
'url': 'https://vk.com/wall85155021_6319',
'info_dict': {
'id': '85155021_6319',
'title': 'Сергей Горбунов - Wall post 85155021_6319',
},
'playlist_count': 1,
'params': {
'usenetrc': True,
},
'skip': 'Requires vk account credentials',
}, {
# wall page URL
'url': 'https://vk.com/wall-23538238_35',
'only_matching': True,
}, {
# mobile wall page URL
'url': 'https://m.vk.com/wall-23538238_35',
'only_matching': True,
}]
_BASE64_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN0PQRSTUVWXYZO123456789+/='
_AUDIO = collections.namedtuple('Audio', ['id', 'owner_id', 'url', 'title', 'performer', 'duration', 'album_id', 'unk', 'author_link', 'lyrics', 'flags', 'context', 'extra', 'hashes', 'cover_url', 'ads'])
def _decode(self, enc):
dec = ''
e = n = 0
for c in enc:
r = self._BASE64_CHARS.index(c)
cond = n % 4
e = 64 * e + r if cond else r
n += 1
if cond:
dec += chr(255 & e >> (-2 * n & 6))
return dec
def _unmask_url(self, mask_url, vk_id):
if 'audio_api_unavailable' in mask_url:
extra = mask_url.split('?extra=')[1].split('#')
func, base = self._decode(extra[1]).split(chr(11))
mask_url = list(self._decode(extra[0]))
url_len = len(mask_url)
indexes = [None] * url_len
index = int(base) ^ vk_id
for n in range(url_len - 1, -1, -1):
index = (url_len * (n + 1) ^ index + n) % url_len
indexes[n] = index
for n in range(1, url_len):
c = mask_url[n]
index = indexes[url_len - 1 - n]
mask_url[n] = mask_url[index]
mask_url[index] = c
mask_url = ''.join(mask_url)
return mask_url
def _real_extract(self, url):
post_id = self._match_id(url)
webpage = self._download_payload('wkview', post_id, {
'act': 'show',
'w': 'wall' + post_id,
})[1]
description = clean_html(get_element_by_class('wall_post_text', webpage))
uploader = clean_html(get_element_by_class('author', webpage))
entries = []
for audio in re.findall(r'data-audio="([^"]+)', webpage):
audio = self._parse_json(unescapeHTML(audio), post_id)
a = self._AUDIO._make(audio[:16])
if not a.url:
continue
title = unescapeHTML(a.title)
performer = unescapeHTML(a.performer)
entries.append({
'id': '%s_%s' % (a.owner_id, a.id),
'url': self._unmask_url(a.url, a.ads['vk_id']),
'title': '%s - %s' % (performer, title) if performer else title,
'thumbnails': [{'url': c_url} for c_url in a.cover_url.split(',')] if a.cover_url else None,
'duration': int_or_none(a.duration),
'uploader': uploader,
'artist': performer,
'track': title,
'ext': 'mp4',
'protocol': 'm3u8',
})
for video in re.finditer(
r'<a[^>]+href=(["\'])(?P<url>/video(?:-?[\d_]+).*?)\1', webpage):
entries.append(self.url_result(
compat_urlparse.urljoin(url, video.group('url')), VKIE.ie_key()))
title = 'Wall post %s' % post_id
return self.playlist_result(
orderedSet(entries), post_id,
'%s - %s' % (uploader, title) if uploader else title,
description)
|
unlicense
|
a3023af4e8f9b0a8a923381b8aa20562
| 36.626844
| 208
| 0.483007
| 3.491788
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/rtve.py
|
24
|
10066
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_struct_unpack,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
remove_end,
remove_start,
sanitized_Request,
std_headers,
)
def _decrypt_url(png):
encrypted_data = compat_b64decode(png)
text_index = encrypted_data.find(b'tEXt')
text_chunk = encrypted_data[text_index - 4:]
length = compat_struct_unpack('!I', text_chunk[:4])[0]
# Use bytearray to get integers when iterating in both python 2.x and 3.x
data = bytearray(text_chunk[8:8 + length])
data = [chr(b) for b in data if b != 0]
hash_index = data.index('#')
alphabet_data = data[:hash_index]
url_data = data[hash_index + 1:]
if url_data[0] == 'H' and url_data[3] == '%':
# remove useless HQ%% at the start
url_data = url_data[4:]
alphabet = []
e = 0
d = 0
for l in alphabet_data:
if d == 0:
alphabet.append(l)
d = e = (e + 1) % 4
else:
d -= 1
url = ''
f = 0
e = 3
b = 1
for letter in url_data:
if f == 0:
l = int(letter) * 10
f = 1
else:
if e == 0:
l += int(letter)
url += alphabet[l]
e = (b + 3) % 4
f = 0
b += 1
else:
e -= 1
return url
class RTVEALaCartaIE(InfoExtractor):
IE_NAME = 'rtve.es:alacarta'
IE_DESC = 'RTVE a la carta'
_VALID_URL = r'https?://(?:www\.)?rtve\.es/(m/)?(alacarta/videos|filmoteca)/[^/]+/[^/]+/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.rtve.es/alacarta/videos/balonmano/o-swiss-cup-masculina-final-espana-suecia/2491869/',
'md5': '1d49b7e1ca7a7502c56a4bf1b60f1b43',
'info_dict': {
'id': '2491869',
'ext': 'mp4',
'title': 'Balonmano - Swiss Cup masculina. Final: España-Suecia',
'duration': 5024.566,
},
}, {
'note': 'Live stream',
'url': 'http://www.rtve.es/alacarta/videos/television/24h-live/1694255/',
'info_dict': {
'id': '1694255',
'ext': 'flv',
'title': 'TODO',
},
'skip': 'The f4m manifest can\'t be used yet',
}, {
'url': 'http://www.rtve.es/alacarta/videos/servir-y-proteger/servir-proteger-capitulo-104/4236788/',
'md5': 'e55e162379ad587e9640eda4f7353c0f',
'info_dict': {
'id': '4236788',
'ext': 'mp4',
'title': 'Servir y proteger - Capítulo 104 ',
'duration': 3222.0,
},
'params': {
'skip_download': True, # requires ffmpeg
},
}, {
'url': 'http://www.rtve.es/m/alacarta/videos/cuentame-como-paso/cuentame-como-paso-t16-ultimo-minuto-nuestra-vida-capitulo-276/2969138/?media=tve',
'only_matching': True,
}, {
'url': 'http://www.rtve.es/filmoteca/no-do/not-1-introduccion-primer-noticiario-espanol/1465256/',
'only_matching': True,
}]
def _real_initialize(self):
user_agent_b64 = base64.b64encode(std_headers['User-Agent'].encode('utf-8')).decode('utf-8')
manager_info = self._download_json(
'http://www.rtve.es/odin/loki/' + user_agent_b64,
None, 'Fetching manager info')
self._manager = manager_info['manager']
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
info = self._download_json(
'http://www.rtve.es/api/videos/%s/config/alacarta_videos.json' % video_id,
video_id)['page']['items'][0]
if info['state'] == 'DESPU':
raise ExtractorError('The video is no longer available', expected=True)
title = info['title']
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id)
png_request = sanitized_Request(png_url)
png_request.add_header('Referer', url)
png = self._download_webpage(png_request, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
ext = determine_ext(video_url)
formats = []
if not video_url.endswith('.f4m') and ext != 'm3u8':
if '?' not in video_url:
video_url = video_url.replace('resources/', 'auth/resources/')
video_url = video_url.replace('.net.rtve', '.multimedia.cdn.rtve')
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds', fatal=False))
else:
formats.append({
'url': video_url,
})
self._sort_formats(formats)
subtitles = None
if info.get('sbtFile') is not None:
subtitles = self.extract_subtitles(video_id, info['sbtFile'])
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': info.get('image'),
'page_url': url,
'subtitles': subtitles,
'duration': float_or_none(info.get('duration'), scale=1000),
}
def _get_subtitles(self, video_id, sub_file):
subs = self._download_json(
sub_file + '.json', video_id,
'Downloading subtitles info')['page']['items']
return dict(
(s['lang'], [{'ext': 'vtt', 'url': s['src']}])
for s in subs)
class RTVEInfantilIE(InfoExtractor):
IE_NAME = 'rtve.es:infantil'
IE_DESC = 'RTVE infantil'
_VALID_URL = r'https?://(?:www\.)?rtve\.es/infantil/serie/(?P<show>[^/]*)/video/(?P<short_title>[^/]*)/(?P<id>[0-9]+)/'
_TESTS = [{
'url': 'http://www.rtve.es/infantil/serie/cleo/video/maneras-vivir/3040283/',
'md5': '915319587b33720b8e0357caaa6617e6',
'info_dict': {
'id': '3040283',
'ext': 'mp4',
'title': 'Maneras de vivir',
'thumbnail': 'http://www.rtve.es/resources/jpg/6/5/1426182947956.JPG',
'duration': 357.958,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://www.rtve.es/api/videos/%s/config/alacarta_videos.json' % video_id,
video_id)['page']['items'][0]
webpage = self._download_webpage(url, video_id)
vidplayer_id = self._search_regex(
r' id="vidplayer([0-9]+)"', webpage, 'internal video ID')
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/default/videos/%s.png' % vidplayer_id
png = self._download_webpage(png_url, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
return {
'id': video_id,
'ext': 'mp4',
'title': info['title'],
'url': video_url,
'thumbnail': info.get('image'),
'duration': float_or_none(info.get('duration'), scale=1000),
}
class RTVELiveIE(InfoExtractor):
IE_NAME = 'rtve.es:live'
IE_DESC = 'RTVE.es live streams'
_VALID_URL = r'https?://(?:www\.)?rtve\.es/directo/(?P<id>[a-zA-Z0-9-]+)'
_TESTS = [{
'url': 'http://www.rtve.es/directo/la-1/',
'info_dict': {
'id': 'la-1',
'ext': 'mp4',
'title': 're:^La 1 [0-9]{4}-[0-9]{2}-[0-9]{2}Z[0-9]{6}$',
},
'params': {
'skip_download': 'live stream',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
start_time = time.gmtime()
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = remove_end(self._og_search_title(webpage), ' en directo en RTVE.es')
title = remove_start(title, 'Estoy viendo ')
title += ' ' + time.strftime('%Y-%m-%dZ%H%M%S', start_time)
vidplayer_id = self._search_regex(
(r'playerId=player([0-9]+)',
r'class=["\'].*?\blive_mod\b.*?["\'][^>]+data-assetid=["\'](\d+)',
r'data-id=["\'](\d+)'),
webpage, 'internal video ID')
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/amonet/videos/%s.png' % vidplayer_id
png = self._download_webpage(png_url, video_id, 'Downloading url information')
m3u8_url = _decrypt_url(png)
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'is_live': True,
}
class RTVETelevisionIE(InfoExtractor):
IE_NAME = 'rtve.es:television'
_VALID_URL = r'https?://(?:www\.)?rtve\.es/television/[^/]+/[^/]+/(?P<id>\d+).shtml'
_TEST = {
'url': 'http://www.rtve.es/television/20160628/revolucion-del-movil/1364141.shtml',
'info_dict': {
'id': '3069778',
'ext': 'mp4',
'title': 'Documentos TV - La revolución del móvil',
'duration': 3496.948,
},
'params': {
'skip_download': True,
},
}
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
alacarta_url = self._search_regex(
r'data-location="alacarta_videos"[^<]+url":"(http://www\.rtve\.es/alacarta.+?)&',
webpage, 'alacarta url', default=None)
if alacarta_url is None:
raise ExtractorError(
'The webpage doesn\'t contain any video', expected=True)
return self.url_result(alacarta_url, ie=RTVEALaCartaIE.ie_key())
|
unlicense
|
8b6eed33e71a5c678908d54d9cc69acc
| 33.458904
| 155
| 0.528722
| 3.180152
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/vidlii.py
|
28
|
4530
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
get_element_by_id,
int_or_none,
strip_or_none,
unified_strdate,
urljoin,
)
class VidLiiIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vidlii\.com/(?:watch|embed)\?.*?\bv=(?P<id>[0-9A-Za-z_-]{11})'
_TESTS = [{
'url': 'https://www.vidlii.com/watch?v=tJluaH4BJ3v',
'md5': '9bf7d1e005dfa909b6efb0a1ff5175e2',
'info_dict': {
'id': 'tJluaH4BJ3v',
'ext': 'mp4',
'title': 'Vidlii is against me',
'description': 'md5:fa3f119287a2bfb922623b52b1856145',
'thumbnail': 're:https://.*.jpg',
'uploader': 'APPle5auc31995',
'uploader_url': 'https://www.vidlii.com/user/APPle5auc31995',
'upload_date': '20171107',
'duration': 212,
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['News & Politics'],
'tags': ['Vidlii', 'Jan', 'Videogames'],
}
}, {
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
video_url = self._search_regex(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
'video url', group='url')
title = self._search_regex(
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
'title')
description = self._html_search_meta(
('description', 'twitter:description'), webpage,
default=None) or strip_or_none(
get_element_by_id('des_text', webpage))
thumbnail = self._html_search_meta(
'twitter:image', webpage, default=None)
if not thumbnail:
thumbnail_path = self._search_regex(
r'img\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
'thumbnail', fatal=False, group='url')
if thumbnail_path:
thumbnail = urljoin(url, thumbnail_path)
uploader = self._search_regex(
r'<div[^>]+class=["\']wt_person[^>]+>\s*<a[^>]+\bhref=["\']/user/[^>]+>([^<]+)',
webpage, 'uploader', fatal=False)
uploader_url = 'https://www.vidlii.com/user/%s' % uploader if uploader else None
upload_date = unified_strdate(self._html_search_meta(
'datePublished', webpage, default=None) or self._search_regex(
r'<date>([^<]+)', webpage, 'upload date', fatal=False))
duration = int_or_none(self._html_search_meta(
'video:duration', webpage, 'duration',
default=None) or self._search_regex(
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
(r'<strong>(\d+)</strong> views',
r'Views\s*:\s*<strong>(\d+)</strong>'),
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
(r'<span[^>]+id=["\']cmt_num[^>]+>(\d+)',
r'Comments\s*:\s*<strong>(\d+)'),
webpage, 'comment count', fatal=False))
average_rating = float_or_none(self._search_regex(
r'rating\s*:\s*([\d.]+)', webpage, 'average rating', fatal=False))
category = self._html_search_regex(
r'<div>Category\s*:\s*</div>\s*<div>\s*<a[^>]+>([^<]+)', webpage,
'category', fatal=False)
categories = [category] if category else None
tags = [
strip_or_none(tag)
for tag in re.findall(
r'<a[^>]+\bhref=["\']/results\?.*?q=[^>]*>([^<]+)',
webpage) if strip_or_none(tag)
] or None
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_url': uploader_url,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
'average_rating': average_rating,
'categories': categories,
'tags': tags,
}
|
unlicense
|
6dda50ce77ac18fcc1a948fbb334edbf
| 35.24
| 100
| 0.505519
| 3.450114
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/sixplay.py
|
15
|
5252
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
int_or_none,
try_get,
qualities,
)
class SixPlayIE(InfoExtractor):
IE_NAME = '6play'
_VALID_URL = r'(?:6play:|https?://(?:www\.)?(?P<domain>6play\.fr|rtlplay\.be|play\.rtl\.hr|rtlmost\.hu)/.+?-c_)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.6play.fr/minute-par-minute-p_9533/le-but-qui-a-marque-lhistoire-du-football-francais-c_12041051',
'md5': '31fcd112637baa0c2ab92c4fcd8baf27',
'info_dict': {
'id': '12041051',
'ext': 'mp4',
'title': 'Le but qui a marqué l\'histoire du football français !',
'description': 'md5:b59e7e841d646ef1eb42a7868eb6a851',
},
}, {
'url': 'https://www.rtlplay.be/rtl-info-13h-p_8551/les-titres-du-rtlinfo-13h-c_12045869',
'only_matching': True,
}, {
'url': 'https://play.rtl.hr/pj-masks-p_9455/epizoda-34-sezona-1-catboyevo-cudo-na-dva-kotaca-c_11984989',
'only_matching': True,
}, {
'url': 'https://www.rtlmost.hu/megtorve-p_14167/megtorve-6-resz-c_12397787',
'only_matching': True,
}]
def _real_extract(self, url):
domain, video_id = re.search(self._VALID_URL, url).groups()
service, consumer_name = {
'6play.fr': ('6play', 'm6web'),
'rtlplay.be': ('rtlbe_rtl_play', 'rtlbe'),
'play.rtl.hr': ('rtlhr_rtl_play', 'rtlhr'),
'rtlmost.hu': ('rtlhu_rtl_most', 'rtlhu'),
}.get(domain, ('6play', 'm6web'))
data = self._download_json(
'https://pc.middleware.6play.fr/6play/v2/platforms/m6group_web/services/%s/videos/clip_%s' % (service, video_id),
video_id, headers={
'x-customer-name': consumer_name
}, query={
'csa': 5,
'with': 'clips',
})
clip_data = data['clips'][0]
title = clip_data['title']
urls = []
quality_key = qualities(['lq', 'sd', 'hq', 'hd'])
formats = []
subtitles = {}
assets = clip_data.get('assets') or []
for asset in assets:
asset_url = asset.get('full_physical_path')
protocol = asset.get('protocol')
if not asset_url or ((protocol == 'primetime' or asset.get('type') == 'usp_hlsfp_h264') and not ('_drmnp.ism/' in asset_url or '_unpnp.ism/' in asset_url)) or asset_url in urls:
continue
urls.append(asset_url)
container = asset.get('video_container')
ext = determine_ext(asset_url)
if protocol == 'http_subtitle' or ext == 'vtt':
subtitles.setdefault('fr', []).append({'url': asset_url})
continue
if container == 'm3u8' or ext == 'm3u8':
if protocol == 'usp':
if compat_parse_qs(compat_urllib_parse_urlparse(asset_url).query).get('token', [None])[0]:
urlh = self._request_webpage(
asset_url, video_id, fatal=False,
headers=self.geo_verification_headers())
if not urlh:
continue
asset_url = urlh.geturl()
asset_url = asset_url.replace('_drmnp.ism/', '_unpnp.ism/')
for i in range(3, 0, -1):
asset_url = asset_url = asset_url.replace('_sd1/', '_sd%d/' % i)
m3u8_formats = self._extract_m3u8_formats(
asset_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
formats.extend(self._extract_mpd_formats(
asset_url.replace('.m3u8', '.mpd'),
video_id, mpd_id='dash', fatal=False))
if m3u8_formats:
break
else:
formats.extend(self._extract_m3u8_formats(
asset_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif container == 'mp4' or ext == 'mp4':
quality = asset.get('video_quality')
formats.append({
'url': asset_url,
'format_id': quality,
'quality': quality_key(quality),
'ext': ext,
})
self._sort_formats(formats)
def get(getter):
for src in (data, clip_data):
v = try_get(src, getter, compat_str)
if v:
return v
return {
'id': video_id,
'title': title,
'description': get(lambda x: x['description']),
'duration': int_or_none(clip_data.get('duration')),
'series': get(lambda x: x['program']['title']),
'formats': formats,
'subtitles': subtitles,
}
|
unlicense
|
7d827b3855e7b89d34c5d0e06351e60a
| 39.697674
| 189
| 0.488762
| 3.502335
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/linkedin.py
|
17
|
6753
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
urlencode_postdata,
urljoin,
)
class LinkedInLearningBaseIE(InfoExtractor):
_NETRC_MACHINE = 'linkedin'
_LOGIN_URL = 'https://www.linkedin.com/uas/login?trk=learning'
def _call_api(self, course_slug, fields, video_slug=None, resolution=None):
query = {
'courseSlug': course_slug,
'fields': fields,
'q': 'slugs',
}
sub = ''
if video_slug:
query.update({
'videoSlug': video_slug,
'resolution': '_%s' % resolution,
})
sub = ' %dp' % resolution
api_url = 'https://www.linkedin.com/learning-api/detailedCourses'
return self._download_json(
api_url, video_slug, 'Downloading%s JSON metadata' % sub, headers={
'Csrf-Token': self._get_cookies(api_url)['JSESSIONID'].value,
}, query=query)['elements'][0]
def _get_urn_id(self, video_data):
urn = video_data.get('urn')
if urn:
mobj = re.search(r'urn:li:lyndaCourse:\d+,(\d+)', urn)
if mobj:
return mobj.group(1)
def _get_video_id(self, video_data, course_slug, video_slug):
return self._get_urn_id(video_data) or '%s/%s' % (course_slug, video_slug)
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
action_url = urljoin(self._LOGIN_URL, self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, 'post url',
default='https://www.linkedin.com/uas/login-submit', group='url'))
data = self._hidden_inputs(login_page)
data.update({
'session_key': email,
'session_password': password,
})
login_submit_page = self._download_webpage(
action_url, None, 'Logging in',
data=urlencode_postdata(data))
error = self._search_regex(
r'<span[^>]+class="error"[^>]*>\s*(.+?)\s*</span>',
login_submit_page, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
class LinkedInLearningIE(LinkedInLearningBaseIE):
IE_NAME = 'linkedin:learning'
_VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<course_slug>[^/]+)/(?P<id>[^/?#]+)'
_TEST = {
'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals/welcome?autoplay=true',
'md5': 'a1d74422ff0d5e66a792deb996693167',
'info_dict': {
'id': '90426',
'ext': 'mp4',
'title': 'Welcome',
'timestamp': 1430396150.82,
'upload_date': '20150430',
},
}
def _real_extract(self, url):
course_slug, video_slug = re.match(self._VALID_URL, url).groups()
video_data = None
formats = []
for width, height in ((640, 360), (960, 540), (1280, 720)):
video_data = self._call_api(
course_slug, 'selectedVideo', video_slug, height)['selectedVideo']
video_url_data = video_data.get('url') or {}
progressive_url = video_url_data.get('progressiveUrl')
if progressive_url:
formats.append({
'format_id': 'progressive-%dp' % height,
'url': progressive_url,
'height': height,
'width': width,
'source_preference': 1,
})
title = video_data['title']
audio_url = video_data.get('audio', {}).get('progressiveUrl')
if audio_url:
formats.append({
'abr': 64,
'ext': 'm4a',
'format_id': 'audio',
'url': audio_url,
'vcodec': 'none',
})
streaming_url = video_url_data.get('streamingUrl')
if streaming_url:
formats.extend(self._extract_m3u8_formats(
streaming_url, video_slug, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
self._sort_formats(formats, ('width', 'height', 'source_preference', 'tbr', 'abr'))
return {
'id': self._get_video_id(video_data, course_slug, video_slug),
'title': title,
'formats': formats,
'thumbnail': video_data.get('defaultThumbnail'),
'timestamp': float_or_none(video_data.get('publishedOn'), 1000),
'duration': int_or_none(video_data.get('durationInSeconds')),
}
class LinkedInLearningCourseIE(LinkedInLearningBaseIE):
IE_NAME = 'linkedin:learning:course'
_VALID_URL = r'https?://(?:www\.)?linkedin\.com/learning/(?P<id>[^/?#]+)'
_TEST = {
'url': 'https://www.linkedin.com/learning/programming-foundations-fundamentals',
'info_dict': {
'id': 'programming-foundations-fundamentals',
'title': 'Programming Foundations: Fundamentals',
'description': 'md5:76e580b017694eb89dc8e8923fff5c86',
},
'playlist_mincount': 61,
}
@classmethod
def suitable(cls, url):
return False if LinkedInLearningIE.suitable(url) else super(LinkedInLearningCourseIE, cls).suitable(url)
def _real_extract(self, url):
course_slug = self._match_id(url)
course_data = self._call_api(course_slug, 'chapters,description,title')
entries = []
for chapter_number, chapter in enumerate(course_data.get('chapters', []), 1):
chapter_title = chapter.get('title')
chapter_id = self._get_urn_id(chapter)
for video in chapter.get('videos', []):
video_slug = video.get('slug')
if not video_slug:
continue
entries.append({
'_type': 'url_transparent',
'id': self._get_video_id(video, course_slug, video_slug),
'title': video.get('title'),
'url': 'https://www.linkedin.com/learning/%s/%s' % (course_slug, video_slug),
'chapter': chapter_title,
'chapter_number': chapter_number,
'chapter_id': chapter_id,
'ie_key': LinkedInLearningIE.ie_key(),
})
return self.playlist_result(
entries, course_slug,
course_data.get('title'),
course_data.get('description'))
|
unlicense
|
0cdcf05c0d5adabfe642761ad1b6b410
| 36.104396
| 112
| 0.535614
| 3.733002
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/teamcoco.py
|
13
|
7310
|
# coding: utf-8
from __future__ import unicode_literals
import json
from .turner import TurnerBaseIE
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
mimetype2ext,
parse_duration,
parse_iso8601,
qualities,
)
class TeamcocoIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:\w+\.)?teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)'
_TESTS = [
{
'url': 'http://teamcoco.com/video/mary-kay-remote',
'md5': '55d532f81992f5c92046ad02fec34d7d',
'info_dict': {
'id': '80187',
'ext': 'mp4',
'title': 'Conan Becomes A Mary Kay Beauty Consultant',
'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
'duration': 495.0,
'upload_date': '20140402',
'timestamp': 1396407600,
}
}, {
'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
'info_dict': {
'id': '19705',
'ext': 'mp4',
'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
'title': 'Louis C.K. Interview Pt. 1 11/3/11',
'duration': 288,
'upload_date': '20111104',
'timestamp': 1320405840,
}
}, {
'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey',
'info_dict': {
'id': '88748',
'ext': 'mp4',
'title': 'Timothy Olyphant Raises A Toast To “Justified”',
'description': 'md5:15501f23f020e793aeca761205e42c24',
'upload_date': '20150415',
'timestamp': 1429088400,
},
'params': {
'skip_download': True, # m3u8 downloads
}
}, {
'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9',
'info_dict': {
'id': '89341',
'ext': 'mp4',
'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
},
'params': {
'skip_download': True, # m3u8 downloads
},
'skip': 'This video is no longer available.',
}, {
'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18',
'only_matching': True,
}, {
'url': 'http://teamcoco.com/italy/conan-jordan-schlansky-hit-the-streets-of-florence',
'only_matching': True,
}, {
'url': 'http://teamcoco.com/haiti/conan-s-haitian-history-lesson',
'only_matching': True,
}, {
'url': 'http://teamcoco.com/israel/conan-hits-the-streets-beaches-of-tel-aviv',
'only_matching': True,
}, {
'url': 'https://conan25.teamcoco.com/video/ice-cube-kevin-hart-conan-share-lyft',
'only_matching': True,
}
]
_RECORD_TEMPL = '''id
title
teaser
publishOn
thumb {
preview
}
tags {
name
}
duration
turnerMediaId
turnerMediaAuthToken'''
def _graphql_call(self, query_template, object_type, object_id):
find_object = 'find' + object_type
return self._download_json(
'https://teamcoco.com/graphql', object_id, data=json.dumps({
'query': query_template % (find_object, object_id)
}).encode(), headers={
'Content-Type': 'application/json',
})['data'][find_object]
def _real_extract(self, url):
display_id = self._match_id(url)
response = self._graphql_call('''{
%%s(slug: "%%s") {
... on RecordSlug {
record {
%s
}
}
... on PageSlug {
child {
id
}
}
... on NotFoundSlug {
status
}
}
}''' % self._RECORD_TEMPL, 'Slug', display_id)
if response.get('status'):
raise ExtractorError('This video is no longer available.', expected=True)
child = response.get('child')
if child:
record = self._graphql_call('''{
%%s(id: "%%s") {
... on Video {
%s
}
}
}''' % self._RECORD_TEMPL, 'Record', child['id'])
else:
record = response['record']
video_id = record['id']
info = {
'id': video_id,
'display_id': display_id,
'title': record['title'],
'thumbnail': record.get('thumb', {}).get('preview'),
'description': record.get('teaser'),
'duration': parse_duration(record.get('duration')),
'timestamp': parse_iso8601(record.get('publishOn')),
}
media_id = record.get('turnerMediaId')
if media_id:
self._initialize_geo_bypass({
'countries': ['US'],
})
info.update(self._extract_ngtv_info(media_id, {
'accessToken': record['turnerMediaAuthToken'],
'accessTokenType': 'jws',
}))
else:
video_sources = self._download_json(
'https://teamcoco.com/_truman/d/' + video_id,
video_id)['meta']['src']
if isinstance(video_sources, dict):
video_sources = video_sources.values()
formats = []
get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
for src in video_sources:
if not isinstance(src, dict):
continue
src_url = src.get('src')
if not src_url:
continue
format_id = src.get('label')
ext = determine_ext(src_url, mimetype2ext(src.get('type')))
if format_id == 'hls' or ext == 'm3u8':
# compat_urllib_parse.urljoin does not work here
if src_url.startswith('/'):
src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url
formats.extend(self._extract_m3u8_formats(
src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
else:
if src_url.startswith('/mp4:protected/'):
# TODO Correct extraction for these files
continue
tbr = int_or_none(self._search_regex(
r'(\d+)k\.mp4', src_url, 'tbr', default=None))
formats.append({
'url': src_url,
'ext': ext,
'tbr': tbr,
'format_id': format_id,
'quality': get_quality(format_id),
})
self._sort_formats(formats)
info['formats'] = formats
return info
|
unlicense
|
0890804245e0889f9407872519f4ad24
| 34.639024
| 167
| 0.482206
| 3.658488
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/washingtonpost.py
|
5
|
5257
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class WashingtonPostIE(InfoExtractor):
IE_NAME = 'washingtonpost'
_VALID_URL = r'(?:washingtonpost:|https?://(?:www\.)?washingtonpost\.com/(?:video|posttv)/(?:[^/]+/)*)(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_EMBED_URL = r'https?://(?:www\.)?washingtonpost\.com/video/c/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
_TESTS = [{
'url': 'https://www.washingtonpost.com/video/c/video/480ba4ee-1ec7-11e6-82c2-a7dcb313287d',
'md5': '6f537e1334b714eb15f9563bd4b9cdfa',
'info_dict': {
'id': '480ba4ee-1ec7-11e6-82c2-a7dcb313287d',
'ext': 'mp4',
'title': 'Egypt finds belongings, debris from plane crash',
'description': 'md5:a17ceee432f215a5371388c1f680bd86',
'upload_date': '20160520',
'timestamp': 1463775187,
},
}, {
'url': 'https://www.washingtonpost.com/video/world/egypt-finds-belongings-debris-from-plane-crash/2016/05/20/480ba4ee-1ec7-11e6-82c2-a7dcb313287d_video.html',
'only_matching': True,
}, {
'url': 'https://www.washingtonpost.com/posttv/world/iraq-to-track-down-antiquities-after-islamic-state-museum-rampage/2015/02/28/7c57e916-bf86-11e4-9dfb-03366e719af8_video.html',
'only_matching': True,
}]
@classmethod
def _extract_urls(cls, webpage):
return re.findall(
r'<iframe[^>]+\bsrc=["\'](%s)' % cls._EMBED_URL, webpage)
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
'arcpublishing:wapo:' + video_id, 'ArcPublishing', video_id)
class WashingtonPostArticleIE(InfoExtractor):
IE_NAME = 'washingtonpost:article'
_VALID_URL = r'https?://(?:www\.)?washingtonpost\.com/(?:[^/]+/)*(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.washingtonpost.com/sf/national/2014/03/22/sinkhole-of-bureaucracy/',
'info_dict': {
'id': 'sinkhole-of-bureaucracy',
'title': 'Sinkhole of bureaucracy',
},
'playlist': [{
'md5': 'b9be794ceb56c7267d410a13f99d801a',
'info_dict': {
'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'Breaking Points: The Paper Mine',
'duration': 1290,
'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.',
'timestamp': 1395440416,
'upload_date': '20140321',
},
}, {
'md5': '1fff6a689d8770966df78c8cb6c8c17c',
'info_dict': {
'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'The town bureaucracy sustains',
'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.',
'duration': 2220,
'timestamp': 1395441819,
'upload_date': '20140321',
},
}],
}, {
'url': 'http://www.washingtonpost.com/blogs/wonkblog/wp/2014/12/31/one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear/',
'info_dict': {
'id': 'one-airline-figured-out-how-to-make-sure-its-airplanes-never-disappear',
'title': 'One airline figured out how to make sure its airplanes never disappear',
},
'playlist': [{
'md5': 'a7c1b5634ba5e57a6a82cdffa5b1e0d0',
'info_dict': {
'id': '0e4bb54c-9065-11e4-a66f-0ca5037a597d',
'ext': 'mp4',
'description': 'Washington Post transportation reporter Ashley Halsey III explains why a plane\'s black box needs to be recovered from a crash site instead of having its information streamed in real time throughout the flight.',
'upload_date': '20141230',
'timestamp': 1419972442,
'title': 'Why black boxes don’t transmit data in real time',
}
}]
}]
@classmethod
def suitable(cls, url):
return False if WashingtonPostIE.suitable(url) else super(WashingtonPostArticleIE, cls).suitable(url)
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
title = self._og_search_title(webpage)
uuids = re.findall(r'''(?x)
(?:
<div\s+class="posttv-video-embed[^>]*?data-uuid=|
data-video-uuid=
)"([^"]+)"''', webpage)
entries = [self.url_result('washingtonpost:%s' % uuid, 'WashingtonPost', uuid) for uuid in uuids]
return {
'_type': 'playlist',
'entries': entries,
'id': page_id,
'title': title,
}
|
unlicense
|
f09d5f6b5134c16bd25703467f2faab4
| 44.301724
| 311
| 0.577165
| 3.092996
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/spreaker.py
|
7
|
6030
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
str_or_none,
try_get,
unified_timestamp,
url_or_none,
)
def _extract_episode(data, episode_id=None):
title = data['title']
download_url = data['download_url']
series = try_get(data, lambda x: x['show']['title'], compat_str)
uploader = try_get(data, lambda x: x['author']['fullname'], compat_str)
thumbnails = []
for image in ('image_original', 'image_medium', 'image'):
image_url = url_or_none(data.get('%s_url' % image))
if image_url:
thumbnails.append({'url': image_url})
def stats(key):
return int_or_none(try_get(
data,
(lambda x: x['%ss_count' % key],
lambda x: x['stats']['%ss' % key])))
def duration(key):
return float_or_none(data.get(key), scale=1000)
return {
'id': compat_str(episode_id or data['episode_id']),
'url': download_url,
'display_id': data.get('permalink'),
'title': title,
'description': data.get('description'),
'timestamp': unified_timestamp(data.get('published_at')),
'uploader': uploader,
'uploader_id': str_or_none(data.get('author_id')),
'creator': uploader,
'duration': duration('duration') or duration('length'),
'view_count': stats('play'),
'like_count': stats('like'),
'comment_count': stats('message'),
'format': 'MPEG Layer 3',
'format_id': 'mp3',
'container': 'mp3',
'ext': 'mp3',
'thumbnails': thumbnails,
'series': series,
'extractor_key': SpreakerIE.ie_key(),
}
class SpreakerIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
api\.spreaker\.com/
(?:
(?:download/)?episode|
v2/episodes
)/
(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://api.spreaker.com/episode/12534508',
'info_dict': {
'id': '12534508',
'display_id': 'swm-ep15-how-to-market-your-music-part-2',
'ext': 'mp3',
'title': 'EP:15 | Music Marketing (Likes) - Part 2',
'description': 'md5:0588c43e27be46423e183076fa071177',
'timestamp': 1502250336,
'upload_date': '20170809',
'uploader': 'SWM',
'uploader_id': '9780658',
'duration': 1063.42,
'view_count': int,
'like_count': int,
'comment_count': int,
'series': 'Success With Music (SWM)',
},
}, {
'url': 'https://api.spreaker.com/download/episode/12534508/swm_ep15_how_to_market_your_music_part_2.mp3',
'only_matching': True,
}, {
'url': 'https://api.spreaker.com/v2/episodes/12534508?export=episode_segments',
'only_matching': True,
}]
def _real_extract(self, url):
episode_id = self._match_id(url)
data = self._download_json(
'https://api.spreaker.com/v2/episodes/%s' % episode_id,
episode_id)['response']['episode']
return _extract_episode(data, episode_id)
class SpreakerPageIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spreaker\.com/user/[^/]+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.spreaker.com/user/9780658/swm-ep15-how-to-market-your-music-part-2',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
episode_id = self._search_regex(
(r'data-episode_id=["\'](?P<id>\d+)',
r'episode_id\s*:\s*(?P<id>\d+)'), webpage, 'episode id')
return self.url_result(
'https://api.spreaker.com/episode/%s' % episode_id,
ie=SpreakerIE.ie_key(), video_id=episode_id)
class SpreakerShowIE(InfoExtractor):
_VALID_URL = r'https?://api\.spreaker\.com/show/(?P<id>\d+)'
_TESTS = [{
'url': 'https://api.spreaker.com/show/4652058',
'info_dict': {
'id': '4652058',
},
'playlist_mincount': 118,
}]
def _entries(self, show_id):
for page_num in itertools.count(1):
episodes = self._download_json(
'https://api.spreaker.com/show/%s/episodes' % show_id,
show_id, note='Downloading JSON page %d' % page_num, query={
'page': page_num,
'max_per_page': 100,
})
pager = try_get(episodes, lambda x: x['response']['pager'], dict)
if not pager:
break
results = pager.get('results')
if not results or not isinstance(results, list):
break
for result in results:
if not isinstance(result, dict):
continue
yield _extract_episode(result)
if page_num == pager.get('last_page'):
break
def _real_extract(self, url):
show_id = self._match_id(url)
return self.playlist_result(self._entries(show_id), playlist_id=show_id)
class SpreakerShowPageIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spreaker\.com/show/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.spreaker.com/show/success-with-music',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
show_id = self._search_regex(
r'show_id\s*:\s*(?P<id>\d+)', webpage, 'show id')
return self.url_result(
'https://api.spreaker.com/show/%s' % show_id,
ie=SpreakerShowIE.ie_key(), video_id=show_id)
|
unlicense
|
c41f8f78c553ba275034e12a4afe653f
| 33.261364
| 113
| 0.523715
| 3.489583
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/hketv.py
|
15
|
6965
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
merge_dicts,
parse_count,
str_or_none,
try_get,
unified_strdate,
urlencode_postdata,
urljoin,
)
class HKETVIE(InfoExtractor):
IE_NAME = 'hketv'
IE_DESC = '香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau'
_GEO_BYPASS = False
_GEO_COUNTRIES = ['HK']
_VALID_URL = r'https?://(?:www\.)?hkedcity\.net/etv/resource/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.hkedcity.net/etv/resource/2932360618',
'md5': 'f193712f5f7abb208ddef3c5ea6ed0b7',
'info_dict': {
'id': '2932360618',
'ext': 'mp4',
'title': '喜閱一生(共享閱讀樂) (中、英文字幕可供選擇)',
'description': 'md5:d5286d05219ef50e0613311cbe96e560',
'upload_date': '20181024',
'duration': 900,
'subtitles': 'count:2',
},
'skip': 'Geo restricted to HK',
}, {
'url': 'https://www.hkedcity.net/etv/resource/972641418',
'md5': '1ed494c1c6cf7866a8290edad9b07dc9',
'info_dict': {
'id': '972641418',
'ext': 'mp4',
'title': '衣冠楚楚 (天使系列之一)',
'description': 'md5:10bb3d659421e74f58e5db5691627b0f',
'upload_date': '20070109',
'duration': 907,
'subtitles': {},
},
'params': {
'geo_verification_proxy': '<HK proxy here>',
},
'skip': 'Geo restricted to HK',
}]
_CC_LANGS = {
'中文(繁體中文)': 'zh-Hant',
'中文(简体中文)': 'zh-Hans',
'English': 'en',
'Bahasa Indonesia': 'id',
'\u0939\u093f\u0928\u094d\u0926\u0940': 'hi',
'\u0928\u0947\u092a\u093e\u0932\u0940': 'ne',
'Tagalog': 'tl',
'\u0e44\u0e17\u0e22': 'th',
'\u0627\u0631\u062f\u0648': 'ur',
}
_FORMAT_HEIGHTS = {
'SD': 360,
'HD': 720,
}
_APPS_BASE_URL = 'https://apps.hkedcity.net'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = (
self._html_search_meta(
('ed_title', 'search.ed_title'), webpage, default=None)
or self._search_regex(
r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'title', default=None, group='url')
or self._html_search_regex(
r'<h1>([^<]+)</h1>', webpage, 'title', default=None)
or self._og_search_title(webpage)
)
file_id = self._search_regex(
r'post_var\[["\']file_id["\']\s*\]\s*=\s*(.+?);',
webpage, 'file ID')
curr_url = self._search_regex(
r'post_var\[["\']curr_url["\']\s*\]\s*=\s*"(.+?)";',
webpage, 'curr URL')
data = {
'action': 'get_info',
'curr_url': curr_url,
'file_id': file_id,
'video_url': file_id,
}
response = self._download_json(
self._APPS_BASE_URL + '/media/play/handler.php', video_id,
data=urlencode_postdata(data),
headers=merge_dicts({
'Content-Type': 'application/x-www-form-urlencoded'},
self.geo_verification_headers()))
result = response['result']
if not response.get('success') or not response.get('access'):
error = clean_html(response.get('access_err_msg'))
if 'Video streaming is not available in your country' in error:
self.raise_geo_restricted(
msg=error, countries=self._GEO_COUNTRIES)
else:
raise ExtractorError(error, expected=True)
formats = []
width = int_or_none(result.get('width'))
height = int_or_none(result.get('height'))
playlist0 = result['playlist'][0]
for fmt in playlist0['sources']:
file_url = urljoin(self._APPS_BASE_URL, fmt.get('file'))
if not file_url:
continue
# If we ever wanted to provide the final resolved URL that
# does not require cookies, albeit with a shorter lifespan:
# urlh = self._downloader.urlopen(file_url)
# resolved_url = urlh.geturl()
label = fmt.get('label')
h = self._FORMAT_HEIGHTS.get(label)
w = h * width // height if h and width and height else None
formats.append({
'format_id': label,
'ext': fmt.get('type'),
'url': file_url,
'width': w,
'height': h,
})
self._sort_formats(formats)
subtitles = {}
tracks = try_get(playlist0, lambda x: x['tracks'], list) or []
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = str_or_none(track.get('kind'))
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(self._APPS_BASE_URL, track.get('file'))
if not track_url:
continue
track_label = track.get('label')
subtitles.setdefault(self._CC_LANGS.get(
track_label, track_label), []).append({
'url': self._proto_relative_url(track_url),
'ext': 'srt',
})
# Likes
emotion = self._download_json(
'https://emocounter.hkedcity.net/handler.php', video_id,
data=urlencode_postdata({
'action': 'get_emotion',
'data[bucket_id]': 'etv',
'data[identifier]': video_id,
}),
headers={'Content-Type': 'application/x-www-form-urlencoded'},
fatal=False) or {}
like_count = int_or_none(try_get(
emotion, lambda x: x['data']['emotion_data'][0]['count']))
return {
'id': video_id,
'title': title,
'description': self._html_search_meta(
'description', webpage, fatal=False),
'upload_date': unified_strdate(self._html_search_meta(
'ed_date', webpage, fatal=False), day_first=False),
'duration': int_or_none(result.get('length')),
'formats': formats,
'subtitles': subtitles,
'thumbnail': urljoin(self._APPS_BASE_URL, result.get('image')),
'view_count': parse_count(result.get('view_count')),
'like_count': like_count,
}
|
unlicense
|
f7f4bf5cc02aa59fb0dbdf219c075434
| 34.900524
| 86
| 0.506052
| 3.414841
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/nintendo.py
|
13
|
1882
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .ooyala import OoyalaIE
class NintendoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nintendo\.com/(?:games/detail|nintendo-direct)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.nintendo.com/games/detail/duck-hunt-wii-u/',
'info_dict': {
'id': 'MzMmticjp0VPzO3CCj4rmFOuohEuEWoW',
'ext': 'flv',
'title': 'Duck Hunt Wii U VC NES - Trailer',
'duration': 60.326,
},
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
'url': 'http://www.nintendo.com/games/detail/tokyo-mirage-sessions-fe-wii-u',
'info_dict': {
'id': 'tokyo-mirage-sessions-fe-wii-u',
'title': 'Tokyo Mirage Sessions ♯FE',
},
'playlist_count': 4,
}, {
'url': 'https://www.nintendo.com/nintendo-direct/09-04-2019/',
'info_dict': {
'id': 'J2bXdmaTE6fe3dWJTPcc7m23FNbc_A1V',
'ext': 'mp4',
'title': 'Switch_ROS_ND0904-H264.mov',
'duration': 2324.758,
},
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
}]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
entries = [
OoyalaIE._build_url_result(m.group('code'))
for m in re.finditer(
r'data-(?:video-id|directVideoId)=(["\'])(?P<code>(?:(?!\1).)+)\1', webpage)]
title = self._html_search_regex(
r'(?s)<(?:span|div)[^>]+class="(?:title|wrapper)"[^>]*>.*?<h1>(.+?)</h1>',
webpage, 'title', fatal=False)
return self.playlist_result(
entries, page_id, title)
|
unlicense
|
8b0717f78700948a54d3384a9d63b707
| 30.333333
| 102
| 0.50266
| 3.061889
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
test/test_iqiyi_sdk_interpreter.py
|
36
|
1104
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.extractor import IqiyiIE
class IqiyiIEWithCredentials(IqiyiIE):
def _get_login_info(self):
return 'foo', 'bar'
class WarningLogger(object):
def __init__(self):
self.messages = []
def warning(self, msg):
self.messages.append(msg)
def debug(self, msg):
pass
def error(self, msg):
pass
class TestIqiyiSDKInterpreter(unittest.TestCase):
def test_iqiyi_sdk_interpreter(self):
'''
Test the functionality of IqiyiSDKInterpreter by trying to log in
If `sign` is incorrect, /validate call throws an HTTP 556 error
'''
logger = WarningLogger()
ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
ie._login()
self.assertTrue('unable to log in:' in logger.messages[0])
if __name__ == '__main__':
unittest.main()
|
unlicense
|
d7fe3331e893f9b7e2062bc6716d1bfd
| 22
| 79
| 0.650362
| 3.643564
| false
| true
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/reddit.py
|
5
|
5331
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
try_get,
unescapeHTML,
url_or_none,
)
class RedditIE(InfoExtractor):
_VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)'
_TEST = {
# from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/
'url': 'https://v.redd.it/zv89llsvexdz',
'md5': '0a070c53eba7ec4534d95a5a1259e253',
'info_dict': {
'id': 'zv89llsvexdz',
'ext': 'mp4',
'title': 'zv89llsvexdz',
},
'params': {
'format': 'bestvideo',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
formats = self._extract_m3u8_formats(
'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id,
'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
formats.extend(self._extract_mpd_formats(
'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id,
mpd_id='dash', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': video_id,
'formats': formats,
}
class RedditRIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
'info_dict': {
'id': 'zv89llsvexdz',
'ext': 'mp4',
'title': 'That small heart attack.',
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
'thumbnails': 'count:4',
'timestamp': 1501941939,
'upload_date': '20170805',
'uploader': 'Antw87',
'duration': 12,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
},
'params': {
'format': 'bestvideo',
'skip_download': True,
},
}, {
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
'only_matching': True,
}, {
# imgur
'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
'only_matching': True,
}, {
# imgur @ old reddit
'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
'only_matching': True,
}, {
# streamable
'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
'only_matching': True,
}, {
# youtube
'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
'only_matching': True,
}, {
# reddit video @ nm reddit
'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
url, video_id = mobj.group('url', 'id')
video_id = self._match_id(url)
data = self._download_json(
url + '/.json', video_id)[0]['data']['children'][0]['data']
video_url = data['url']
# Avoid recursing into the same reddit URL
if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
raise ExtractorError('No media found', expected=True)
over_18 = data.get('over_18')
if over_18 is True:
age_limit = 18
elif over_18 is False:
age_limit = 0
else:
age_limit = None
thumbnails = []
def add_thumbnail(src):
if not isinstance(src, dict):
return
thumbnail_url = url_or_none(src.get('url'))
if not thumbnail_url:
return
thumbnails.append({
'url': unescapeHTML(thumbnail_url),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
})
for image in try_get(data, lambda x: x['preview']['images']) or []:
if not isinstance(image, dict):
continue
add_thumbnail(image.get('source'))
resolutions = image.get('resolutions')
if isinstance(resolutions, list):
for resolution in resolutions:
add_thumbnail(resolution)
return {
'_type': 'url_transparent',
'url': video_url,
'title': data.get('title'),
'thumbnails': thumbnails,
'timestamp': float_or_none(data.get('created_utc')),
'uploader': data.get('author'),
'duration': int_or_none(try_get(
data,
(lambda x: x['media']['reddit_video']['duration'],
lambda x: x['secure_media']['reddit_video']['duration']))),
'like_count': int_or_none(data.get('ups')),
'dislike_count': int_or_none(data.get('downs')),
'comment_count': int_or_none(data.get('num_comments')),
'age_limit': age_limit,
}
|
unlicense
|
3603bdd95c92477128dce841912f1ebd
| 32.111801
| 114
| 0.512849
| 3.421694
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/ninegag.py
|
1
|
4097
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
try_get,
url_or_none,
)
class NineGagIE(InfoExtractor):
IE_NAME = '9gag'
_VALID_URL = r'https?://(?:www\.)?9gag\.com/gag/(?P<id>[^/?&#]+)'
_TEST = {
'url': 'https://9gag.com/gag/ae5Ag7B',
'info_dict': {
'id': 'ae5Ag7B',
'ext': 'mp4',
'title': 'Capybara Agility Training',
'upload_date': '20191108',
'timestamp': 1573237208,
'categories': ['Awesome'],
'tags': ['Weimaraner', 'American Pit Bull Terrier'],
'duration': 44,
'like_count': int,
'dislike_count': int,
'comment_count': int,
}
}
def _real_extract(self, url):
post_id = self._match_id(url)
post = self._download_json(
'https://9gag.com/v1/post', post_id, query={
'id': post_id
})['data']['post']
if post.get('type') != 'Animated':
raise ExtractorError(
'The given url does not contain a video',
expected=True)
title = post['title']
duration = None
formats = []
thumbnails = []
for key, image in (post.get('images') or {}).items():
image_url = url_or_none(image.get('url'))
if not image_url:
continue
ext = determine_ext(image_url)
image_id = key.strip('image')
common = {
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
}
if ext in ('jpg', 'png'):
webp_url = image.get('webpUrl')
if webp_url:
t = common.copy()
t.update({
'id': image_id + '-webp',
'url': webp_url,
})
thumbnails.append(t)
common.update({
'id': image_id,
'ext': ext,
})
thumbnails.append(common)
elif ext in ('webm', 'mp4'):
if not duration:
duration = int_or_none(image.get('duration'))
common['acodec'] = 'none' if image.get('hasAudio') == 0 else None
for vcodec in ('vp8', 'vp9', 'h265'):
c_url = image.get(vcodec + 'Url')
if not c_url:
continue
c_f = common.copy()
c_f.update({
'format_id': image_id + '-' + vcodec,
'url': c_url,
'vcodec': vcodec,
})
formats.append(c_f)
common.update({
'ext': ext,
'format_id': image_id,
})
formats.append(common)
self._sort_formats(formats)
section = try_get(post, lambda x: x['postSection']['name'])
tags = None
post_tags = post.get('tags')
if post_tags:
tags = []
for tag in post_tags:
tag_key = tag.get('key')
if not tag_key:
continue
tags.append(tag_key)
get_count = lambda x: int_or_none(post.get(x + 'Count'))
return {
'id': post_id,
'title': title,
'timestamp': int_or_none(post.get('creationTs')),
'duration': duration,
'formats': formats,
'thumbnails': thumbnails,
'like_count': get_count('upVote'),
'dislike_count': get_count('downVote'),
'comment_count': get_count('comments'),
'age_limit': 18 if post.get('nsfw') == 1 else None,
'categories': [section] if section else None,
'tags': tags,
}
|
unlicense
|
d1c354f601f269dd6ac91229468f45f9
| 31.776
| 81
| 0.428118
| 4.03248
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/zattoo.py
|
15
|
14325
|
# coding: utf-8
from __future__ import unicode_literals
import re
from uuid import uuid4
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
try_get,
url_or_none,
urlencode_postdata,
)
class ZattooPlatformBaseIE(InfoExtractor):
_power_guide_hash = None
def _host_url(self):
return 'https://%s' % (self._API_HOST if hasattr(self, '_API_HOST') else self._HOST)
def _login(self):
username, password = self._get_login_info()
if not username or not password:
self.raise_login_required(
'A valid %s account is needed to access this media.'
% self._NETRC_MACHINE)
try:
data = self._download_json(
'%s/zapi/v2/account/login' % self._host_url(), None, 'Logging in',
data=urlencode_postdata({
'login': username,
'password': password,
'remember': 'true',
}), headers={
'Referer': '%s/login' % self._host_url(),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
raise ExtractorError(
'Unable to login: incorrect username and/or password',
expected=True)
raise
self._power_guide_hash = data['session']['power_guide_hash']
def _real_initialize(self):
webpage = self._download_webpage(
self._host_url(), None, 'Downloading app token')
app_token = self._html_search_regex(
r'appToken\s*=\s*(["\'])(?P<token>(?:(?!\1).)+?)\1',
webpage, 'app token', group='token')
app_version = self._html_search_regex(
r'<!--\w+-(.+?)-', webpage, 'app version', default='2.8.2')
# Will setup appropriate cookies
self._request_webpage(
'%s/zapi/v2/session/hello' % self._host_url(), None,
'Opening session', data=urlencode_postdata({
'client_app_token': app_token,
'uuid': compat_str(uuid4()),
'lang': 'en',
'app_version': app_version,
'format': 'json',
}))
self._login()
def _extract_cid(self, video_id, channel_name):
channel_groups = self._download_json(
'%s/zapi/v2/cached/channels/%s' % (self._host_url(),
self._power_guide_hash),
video_id, 'Downloading channel list',
query={'details': False})['channel_groups']
channel_list = []
for chgrp in channel_groups:
channel_list.extend(chgrp['channels'])
try:
return next(
chan['cid'] for chan in channel_list
if chan.get('cid') and (
chan.get('display_alias') == channel_name
or chan.get('cid') == channel_name))
except StopIteration:
raise ExtractorError('Could not extract channel id')
def _extract_cid_and_video_info(self, video_id):
data = self._download_json(
'%s/zapi/v2/cached/program/power_details/%s' % (
self._host_url(), self._power_guide_hash),
video_id,
'Downloading video information',
query={
'program_ids': video_id,
'complete': True,
})
p = data['programs'][0]
cid = p['cid']
info_dict = {
'id': video_id,
'title': p.get('t') or p['et'],
'description': p.get('d'),
'thumbnail': p.get('i_url'),
'creator': p.get('channel_name'),
'episode': p.get('et'),
'episode_number': int_or_none(p.get('e_no')),
'season_number': int_or_none(p.get('s_no')),
'release_year': int_or_none(p.get('year')),
'categories': try_get(p, lambda x: x['c'], list),
'tags': try_get(p, lambda x: x['g'], list)
}
return cid, info_dict
def _extract_formats(self, cid, video_id, record_id=None, is_live=False):
postdata_common = {
'https_watch_urls': True,
}
if is_live:
postdata_common.update({'timeshift': 10800})
url = '%s/zapi/watch/live/%s' % (self._host_url(), cid)
elif record_id:
url = '%s/zapi/watch/recording/%s' % (self._host_url(), record_id)
else:
url = '%s/zapi/watch/recall/%s/%s' % (self._host_url(), cid, video_id)
formats = []
for stream_type in ('dash', 'hls', 'hls5', 'hds'):
postdata = postdata_common.copy()
postdata['stream_type'] = stream_type
data = self._download_json(
url, video_id, 'Downloading %s formats' % stream_type.upper(),
data=urlencode_postdata(postdata), fatal=False)
if not data:
continue
watch_urls = try_get(
data, lambda x: x['stream']['watch_urls'], list)
if not watch_urls:
continue
for watch in watch_urls:
if not isinstance(watch, dict):
continue
watch_url = url_or_none(watch.get('url'))
if not watch_url:
continue
format_id_list = [stream_type]
maxrate = watch.get('maxrate')
if maxrate:
format_id_list.append(compat_str(maxrate))
audio_channel = watch.get('audio_channel')
if audio_channel:
format_id_list.append(compat_str(audio_channel))
preference = 1 if audio_channel == 'A' else None
format_id = '-'.join(format_id_list)
if stream_type in ('dash', 'dash_widevine', 'dash_playready'):
this_formats = self._extract_mpd_formats(
watch_url, video_id, mpd_id=format_id, fatal=False)
elif stream_type in ('hls', 'hls5', 'hls5_fairplay'):
this_formats = self._extract_m3u8_formats(
watch_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
fatal=False)
elif stream_type == 'hds':
this_formats = self._extract_f4m_formats(
watch_url, video_id, f4m_id=format_id, fatal=False)
elif stream_type == 'smooth_playready':
this_formats = self._extract_ism_formats(
watch_url, video_id, ism_id=format_id, fatal=False)
else:
assert False
for this_format in this_formats:
this_format['preference'] = preference
formats.extend(this_formats)
self._sort_formats(formats)
return formats
def _extract_video(self, channel_name, video_id, record_id=None, is_live=False):
if is_live:
cid = self._extract_cid(video_id, channel_name)
info_dict = {
'id': channel_name,
'title': self._live_title(channel_name),
'is_live': True,
}
else:
cid, info_dict = self._extract_cid_and_video_info(video_id)
formats = self._extract_formats(
cid, video_id, record_id=record_id, is_live=is_live)
info_dict['formats'] = formats
return info_dict
class QuicklineBaseIE(ZattooPlatformBaseIE):
_NETRC_MACHINE = 'quickline'
_HOST = 'mobiltv.quickline.com'
class QuicklineIE(QuicklineBaseIE):
_VALID_URL = r'https?://(?:www\.)?%s/watch/(?P<channel>[^/]+)/(?P<id>[0-9]+)' % re.escape(QuicklineBaseIE._HOST)
_TEST = {
'url': 'https://mobiltv.quickline.com/watch/prosieben/130671867-maze-runner-die-auserwaehlten-in-der-brandwueste',
'only_matching': True,
}
def _real_extract(self, url):
channel_name, video_id = re.match(self._VALID_URL, url).groups()
return self._extract_video(channel_name, video_id)
class QuicklineLiveIE(QuicklineBaseIE):
_VALID_URL = r'https?://(?:www\.)?%s/watch/(?P<id>[^/]+)' % re.escape(QuicklineBaseIE._HOST)
_TEST = {
'url': 'https://mobiltv.quickline.com/watch/srf1',
'only_matching': True,
}
@classmethod
def suitable(cls, url):
return False if QuicklineIE.suitable(url) else super(QuicklineLiveIE, cls).suitable(url)
def _real_extract(self, url):
channel_name = video_id = self._match_id(url)
return self._extract_video(channel_name, video_id, is_live=True)
class ZattooBaseIE(ZattooPlatformBaseIE):
_NETRC_MACHINE = 'zattoo'
_HOST = 'zattoo.com'
def _make_valid_url(tmpl, host):
return tmpl % re.escape(host)
class ZattooIE(ZattooBaseIE):
_VALID_URL_TEMPLATE = r'https?://(?:www\.)?%s/watch/(?P<channel>[^/]+?)/(?P<id>[0-9]+)[^/]+(?:/(?P<recid>[0-9]+))?'
_VALID_URL = _make_valid_url(_VALID_URL_TEMPLATE, ZattooBaseIE._HOST)
# Since regular videos are only available for 7 days and recorded videos
# are only available for a specific user, we cannot have detailed tests.
_TESTS = [{
'url': 'https://zattoo.com/watch/prosieben/130671867-maze-runner-die-auserwaehlten-in-der-brandwueste',
'only_matching': True,
}, {
'url': 'https://zattoo.com/watch/srf_zwei/132905652-eishockey-spengler-cup/102791477/1512211800000/1514433500000/92000',
'only_matching': True,
}]
def _real_extract(self, url):
channel_name, video_id, record_id = re.match(self._VALID_URL, url).groups()
return self._extract_video(channel_name, video_id, record_id)
class ZattooLiveIE(ZattooBaseIE):
_VALID_URL = r'https?://(?:www\.)?zattoo\.com/watch/(?P<id>[^/]+)'
_TEST = {
'url': 'https://zattoo.com/watch/srf1',
'only_matching': True,
}
@classmethod
def suitable(cls, url):
return False if ZattooIE.suitable(url) else super(ZattooLiveIE, cls).suitable(url)
def _real_extract(self, url):
channel_name = video_id = self._match_id(url)
return self._extract_video(channel_name, video_id, is_live=True)
class NetPlusIE(ZattooIE):
_NETRC_MACHINE = 'netplus'
_HOST = 'netplus.tv'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.netplus.tv/watch/abc/123-abc',
'only_matching': True,
}]
class MNetTVIE(ZattooIE):
_NETRC_MACHINE = 'mnettv'
_HOST = 'tvplus.m-net.de'
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://tvplus.m-net.de/watch/abc/123-abc',
'only_matching': True,
}]
class WalyTVIE(ZattooIE):
_NETRC_MACHINE = 'walytv'
_HOST = 'player.waly.tv'
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://player.waly.tv/watch/abc/123-abc',
'only_matching': True,
}]
class BBVTVIE(ZattooIE):
_NETRC_MACHINE = 'bbvtv'
_HOST = 'bbv-tv.net'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.bbv-tv.net/watch/abc/123-abc',
'only_matching': True,
}]
class VTXTVIE(ZattooIE):
_NETRC_MACHINE = 'vtxtv'
_HOST = 'vtxtv.ch'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.vtxtv.ch/watch/abc/123-abc',
'only_matching': True,
}]
class MyVisionTVIE(ZattooIE):
_NETRC_MACHINE = 'myvisiontv'
_HOST = 'myvisiontv.ch'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.myvisiontv.ch/watch/abc/123-abc',
'only_matching': True,
}]
class GlattvisionTVIE(ZattooIE):
_NETRC_MACHINE = 'glattvisiontv'
_HOST = 'iptv.glattvision.ch'
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://iptv.glattvision.ch/watch/abc/123-abc',
'only_matching': True,
}]
class SAKTVIE(ZattooIE):
_NETRC_MACHINE = 'saktv'
_HOST = 'saktv.ch'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.saktv.ch/watch/abc/123-abc',
'only_matching': True,
}]
class EWETVIE(ZattooIE):
_NETRC_MACHINE = 'ewetv'
_HOST = 'tvonline.ewe.de'
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://tvonline.ewe.de/watch/abc/123-abc',
'only_matching': True,
}]
class QuantumTVIE(ZattooIE):
_NETRC_MACHINE = 'quantumtv'
_HOST = 'quantum-tv.com'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.quantum-tv.com/watch/abc/123-abc',
'only_matching': True,
}]
class OsnatelTVIE(ZattooIE):
_NETRC_MACHINE = 'osnateltv'
_HOST = 'tvonline.osnatel.de'
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://tvonline.osnatel.de/watch/abc/123-abc',
'only_matching': True,
}]
class EinsUndEinsTVIE(ZattooIE):
_NETRC_MACHINE = '1und1tv'
_HOST = '1und1.tv'
_API_HOST = 'www.%s' % _HOST
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://www.1und1.tv/watch/abc/123-abc',
'only_matching': True,
}]
class SaltTVIE(ZattooIE):
_NETRC_MACHINE = 'salttv'
_HOST = 'tv.salt.ch'
_VALID_URL = _make_valid_url(ZattooIE._VALID_URL_TEMPLATE, _HOST)
_TESTS = [{
'url': 'https://tv.salt.ch/watch/abc/123-abc',
'only_matching': True,
}]
|
unlicense
|
c64beb66459a273db2c6d3593733c26b
| 32.083141
| 128
| 0.547225
| 3.320584
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/radiocanada.py
|
19
|
6349
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
unified_strdate,
)
class RadioCanadaIE(InfoExtractor):
IE_NAME = 'radiocanada'
_VALID_URL = r'(?:radiocanada:|https?://ici\.radio-canada\.ca/widgets/mediaconsole/)(?P<app_code>[^:/]+)[:/](?P<id>[0-9]+)'
_TESTS = [
{
'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7184272',
'info_dict': {
'id': '7184272',
'ext': 'mp4',
'title': 'Le parcours du tireur capté sur vidéo',
'description': 'Images des caméras de surveillance fournies par la GRC montrant le parcours du tireur d\'Ottawa',
'upload_date': '20141023',
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
# empty Title
'url': 'http://ici.radio-canada.ca/widgets/mediaconsole/medianet/7754998/',
'info_dict': {
'id': '7754998',
'ext': 'mp4',
'title': 'letelejournal22h',
'description': 'INTEGRALE WEB 22H-TJ',
'upload_date': '20170720',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
# with protectionType but not actually DRM protected
'url': 'radiocanada:toutv:140872',
'info_dict': {
'id': '140872',
'title': 'Épisode 1',
'series': 'District 31',
},
'only_matching': True,
}
]
_GEO_COUNTRIES = ['CA']
_access_token = None
_claims = None
def _call_api(self, path, video_id=None, app_code=None, query=None):
if not query:
query = {}
query.update({
'client_key': '773aea60-0e80-41bb-9c7f-e6d7c3ad17fb',
'output': 'json',
})
if video_id:
query.update({
'appCode': app_code,
'idMedia': video_id,
})
if self._access_token:
query['access_token'] = self._access_token
try:
return self._download_json(
'https://services.radio-canada.ca/media/' + path, video_id, query=query)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 422):
data = self._parse_json(e.cause.read().decode(), None)
error = data.get('error_description') or data['errorMessage']['text']
raise ExtractorError(error, expected=True)
raise
def _extract_info(self, app_code, video_id):
metas = self._call_api('meta/v1/index.ashx', video_id, app_code)['Metas']
def get_meta(name):
for meta in metas:
if meta.get('name') == name:
text = meta.get('text')
if text:
return text
# protectionType does not necessarily mean the video is DRM protected (see
# https://github.com/ytdl-org/youtube-dl/pull/18609).
if get_meta('protectionType'):
self.report_warning('This video is probably DRM protected.')
query = {
'connectionType': 'hd',
'deviceType': 'ipad',
'multibitrate': 'true',
}
if self._claims:
query['claims'] = self._claims
v_data = self._call_api('validation/v2/', video_id, app_code, query)
v_url = v_data.get('url')
if not v_url:
error = v_data['message']
if error == "Le contenu sélectionné n'est pas disponible dans votre pays":
raise self.raise_geo_restricted(error, self._GEO_COUNTRIES)
if error == 'Le contenu sélectionné est disponible seulement en premium':
self.raise_login_required(error)
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, error), expected=True)
formats = self._extract_m3u8_formats(v_url, video_id, 'mp4')
self._sort_formats(formats)
subtitles = {}
closed_caption_url = get_meta('closedCaption') or get_meta('closedCaptionHTML5')
if closed_caption_url:
subtitles['fr'] = [{
'url': closed_caption_url,
'ext': determine_ext(closed_caption_url, 'vtt'),
}]
return {
'id': video_id,
'title': get_meta('Title') or get_meta('AV-nomEmission'),
'description': get_meta('Description') or get_meta('ShortDescription'),
'thumbnail': get_meta('imageHR') or get_meta('imageMR') or get_meta('imageBR'),
'duration': int_or_none(get_meta('length')),
'series': get_meta('Emission'),
'season_number': int_or_none('SrcSaison'),
'episode_number': int_or_none('SrcEpisode'),
'upload_date': unified_strdate(get_meta('Date')),
'subtitles': subtitles,
'formats': formats,
}
def _real_extract(self, url):
return self._extract_info(*re.match(self._VALID_URL, url).groups())
class RadioCanadaAudioVideoIE(InfoExtractor):
IE_NAME = 'radiocanada:audiovideo'
_VALID_URL = r'https?://ici\.radio-canada\.ca/([^/]+/)*media-(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://ici.radio-canada.ca/audio-video/media-7527184/barack-obama-au-vietnam',
'info_dict': {
'id': '7527184',
'ext': 'mp4',
'title': 'Barack Obama au Vietnam',
'description': 'Les États-Unis lèvent l\'embargo sur la vente d\'armes qui datait de la guerre du Vietnam',
'upload_date': '20160523',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'https://ici.radio-canada.ca/info/videos/media-7527184/barack-obama-au-vietnam',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result('radiocanada:medianet:%s' % self._match_id(url))
|
unlicense
|
14893ad7d7786283e9756951062205dc
| 36.070175
| 129
| 0.522322
| 3.599659
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/izlesene.py
|
24
|
4152
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_unquote,
)
from ..utils import (
determine_ext,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
class IzleseneIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:(?:www|m)\.)?izlesene\.com/
(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
'''
_TESTS = [
{
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': r're:^https?://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': int,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
},
{
'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
'md5': '97f09b6872bffa284cb7fa4f6910cb72',
'info_dict': {
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': r're:^https://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': int,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('http://www.izlesene.com/video/%s' % video_id, video_id)
video = self._parse_json(
self._search_regex(
r'videoObj\s*=\s*({.+?})\s*;\s*\n', webpage, 'streams'),
video_id)
title = video.get('videoTitle') or self._og_search_title(webpage)
formats = []
for stream in video['media']['level']:
source_url = stream.get('source')
if not source_url or not isinstance(source_url, compat_str):
continue
ext = determine_ext(url, 'mp4')
quality = stream.get('value')
height = int_or_none(quality)
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': compat_urllib_parse_unquote(source_url),
'ext': ext,
'height': height,
})
self._sort_formats(formats)
description = self._og_search_description(webpage, default=None)
thumbnail = video.get('posterURL') or self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = float_or_none(video.get('duration') or self._html_search_regex(
r'videoduration["\']?\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'duration', fatal=False, group='value'), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader_id': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'age_limit': self._family_friendly_search(webpage),
'formats': formats,
}
|
unlicense
|
c26a91e5a8b97314b45f4cb415f43bae
| 34.42735
| 99
| 0.506152
| 3.573276
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/xiami.py
|
7
|
6816
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import int_or_none
class XiamiBaseIE(InfoExtractor):
_API_BASE_URL = 'https://emumo.xiami.com/song/playlist/cat/json/id'
def _download_webpage_handle(self, *args, **kwargs):
webpage = super(XiamiBaseIE, self)._download_webpage_handle(*args, **kwargs)
if '>Xiami is currently not available in your country.<' in webpage:
self.raise_geo_restricted('Xiami is currently not available in your country')
return webpage
def _extract_track(self, track, track_id=None):
track_name = track.get('songName') or track.get('name') or track['subName']
artist = track.get('artist') or track.get('artist_name') or track.get('singers')
title = '%s - %s' % (artist, track_name) if artist else track_name
track_url = self._decrypt(track['location'])
subtitles = {}
lyrics_url = track.get('lyric_url') or track.get('lyric')
if lyrics_url and lyrics_url.startswith('http'):
subtitles['origin'] = [{'url': lyrics_url}]
return {
'id': track.get('song_id') or track_id,
'url': track_url,
'title': title,
'thumbnail': track.get('pic') or track.get('album_pic'),
'duration': int_or_none(track.get('length')),
'creator': track.get('artist', '').split(';')[0],
'track': track_name,
'track_number': int_or_none(track.get('track')),
'album': track.get('album_name') or track.get('title'),
'artist': artist,
'subtitles': subtitles,
}
def _extract_tracks(self, item_id, referer, typ=None):
playlist = self._download_json(
'%s/%s%s' % (self._API_BASE_URL, item_id, '/type/%s' % typ if typ else ''),
item_id, headers={
'Referer': referer,
})
return [
self._extract_track(track, item_id)
for track in playlist['data']['trackList']]
@staticmethod
def _decrypt(origin):
n = int(origin[0])
origin = origin[1:]
short_length = len(origin) // n
long_num = len(origin) - short_length * n
l = tuple()
for i in range(0, n):
length = short_length
if i < long_num:
length += 1
l += (origin[0:length], )
origin = origin[length:]
ans = ''
for i in range(0, short_length + 1):
for j in range(0, n):
if len(l[j]) > i:
ans += l[j][i]
return compat_urllib_parse_unquote(ans).replace('^', '0')
class XiamiSongIE(XiamiBaseIE):
IE_NAME = 'xiami:song'
IE_DESC = '虾米音乐'
_VALID_URL = r'https?://(?:www\.)?xiami\.com/song/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.xiami.com/song/1775610518',
'md5': '521dd6bea40fd5c9c69f913c232cb57e',
'info_dict': {
'id': '1775610518',
'ext': 'mp3',
'title': 'HONNE - Woman',
'thumbnail': r're:http://img\.xiami\.net/images/album/.*\.jpg',
'duration': 265,
'creator': 'HONNE',
'track': 'Woman',
'album': 'Woman',
'artist': 'HONNE',
'subtitles': {
'origin': [{
'ext': 'lrc',
}],
},
},
'skip': 'Georestricted',
}, {
'url': 'http://www.xiami.com/song/1775256504',
'md5': '932a3abd45c6aa2b1fdbe028fcb4c4fc',
'info_dict': {
'id': '1775256504',
'ext': 'mp3',
'title': '戴荃 - 悟空',
'thumbnail': r're:http://img\.xiami\.net/images/album/.*\.jpg',
'duration': 200,
'creator': '戴荃',
'track': '悟空',
'album': '悟空',
'artist': '戴荃',
'subtitles': {
'origin': [{
'ext': 'lrc',
}],
},
},
'skip': 'Georestricted',
}, {
'url': 'http://www.xiami.com/song/1775953850',
'info_dict': {
'id': '1775953850',
'ext': 'mp3',
'title': 'До Скону - Чума Пожирает Землю',
'thumbnail': r're:http://img\.xiami\.net/images/album/.*\.jpg',
'duration': 683,
'creator': 'До Скону',
'track': 'Чума Пожирает Землю',
'track_number': 7,
'album': 'Ад',
'artist': 'До Скону',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.xiami.com/song/xLHGwgd07a1',
'only_matching': True,
}]
def _real_extract(self, url):
return self._extract_tracks(self._match_id(url), url)[0]
class XiamiPlaylistBaseIE(XiamiBaseIE):
def _real_extract(self, url):
item_id = self._match_id(url)
return self.playlist_result(self._extract_tracks(item_id, url, self._TYPE), item_id)
class XiamiAlbumIE(XiamiPlaylistBaseIE):
IE_NAME = 'xiami:album'
IE_DESC = '虾米音乐 - 专辑'
_VALID_URL = r'https?://(?:www\.)?xiami\.com/album/(?P<id>[^/?#&]+)'
_TYPE = '1'
_TESTS = [{
'url': 'http://www.xiami.com/album/2100300444',
'info_dict': {
'id': '2100300444',
},
'playlist_count': 10,
'skip': 'Georestricted',
}, {
'url': 'http://www.xiami.com/album/512288?spm=a1z1s.6843761.1110925389.6.hhE9p9',
'only_matching': True,
}, {
'url': 'http://www.xiami.com/album/URVDji2a506',
'only_matching': True,
}]
class XiamiArtistIE(XiamiPlaylistBaseIE):
IE_NAME = 'xiami:artist'
IE_DESC = '虾米音乐 - 歌手'
_VALID_URL = r'https?://(?:www\.)?xiami\.com/artist/(?P<id>[^/?#&]+)'
_TYPE = '2'
_TESTS = [{
'url': 'http://www.xiami.com/artist/2132?spm=0.0.0.0.dKaScp',
'info_dict': {
'id': '2132',
},
'playlist_count': 20,
'skip': 'Georestricted',
}, {
'url': 'http://www.xiami.com/artist/bC5Tk2K6eb99',
'only_matching': True,
}]
class XiamiCollectionIE(XiamiPlaylistBaseIE):
IE_NAME = 'xiami:collection'
IE_DESC = '虾米音乐 - 精选集'
_VALID_URL = r'https?://(?:www\.)?xiami\.com/collect/(?P<id>[^/?#&]+)'
_TYPE = '3'
_TEST = {
'url': 'http://www.xiami.com/collect/156527391?spm=a1z1s.2943601.6856193.12.4jpBnr',
'info_dict': {
'id': '156527391',
},
'playlist_mincount': 29,
'skip': 'Georestricted',
}
|
unlicense
|
bcebfd6690b07e93c4310aea9f125829
| 32.278607
| 92
| 0.500075
| 3.111163
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/amp.py
|
5
|
4088
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
mimetype2ext,
parse_iso8601,
unified_timestamp,
url_or_none,
)
class AMPIE(InfoExtractor):
# parse Akamai Adaptive Media Player feed
def _extract_feed_info(self, url):
feed = self._download_json(
url, None, 'Downloading Akamai AMP feed',
'Unable to download Akamai AMP feed')
item = feed.get('channel', {}).get('item')
if not item:
raise ExtractorError('%s said: %s' % (self.IE_NAME, feed['error']))
video_id = item['guid']
def get_media_node(name, default=None):
media_name = 'media-%s' % name
media_group = item.get('media-group') or item
return media_group.get(media_name) or item.get(media_name) or item.get(name, default)
thumbnails = []
media_thumbnail = get_media_node('thumbnail')
if media_thumbnail:
if isinstance(media_thumbnail, dict):
media_thumbnail = [media_thumbnail]
for thumbnail_data in media_thumbnail:
thumbnail = thumbnail_data.get('@attributes', {})
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
thumbnails.append({
'url': self._proto_relative_url(thumbnail_url, 'http:'),
'width': int_or_none(thumbnail.get('width')),
'height': int_or_none(thumbnail.get('height')),
})
subtitles = {}
media_subtitle = get_media_node('subTitle')
if media_subtitle:
if isinstance(media_subtitle, dict):
media_subtitle = [media_subtitle]
for subtitle_data in media_subtitle:
subtitle = subtitle_data.get('@attributes', {})
subtitle_href = url_or_none(subtitle.get('href'))
if not subtitle_href:
continue
subtitles.setdefault(subtitle.get('lang') or 'en', []).append({
'url': subtitle_href,
'ext': mimetype2ext(subtitle.get('type')) or determine_ext(subtitle_href),
})
formats = []
media_content = get_media_node('content')
if isinstance(media_content, dict):
media_content = [media_content]
for media_data in media_content:
media = media_data.get('@attributes', {})
media_url = url_or_none(media.get('url'))
if not media_url:
continue
ext = mimetype2ext(media.get('type')) or determine_ext(media_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
media_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': media_data.get('media-category', {}).get('@attributes', {}).get('label'),
'url': media_url,
'tbr': int_or_none(media.get('bitrate')),
'filesize': int_or_none(media.get('fileSize')),
'ext': ext,
})
self._sort_formats(formats)
timestamp = unified_timestamp(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date'))
return {
'id': video_id,
'title': get_media_node('title'),
'description': get_media_node('description'),
'thumbnails': thumbnails,
'timestamp': timestamp,
'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')),
'subtitles': subtitles,
'formats': formats,
}
|
unlicense
|
1aa663ba5e85e5f335d0b83c12ced40a
| 38.68932
| 106
| 0.525196
| 4.108543
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/radiojavan.py
|
21
|
2761
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_resolution,
str_to_int,
unified_strdate,
urlencode_postdata,
urljoin,
)
class RadioJavanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?radiojavan\.com/videos/video/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.radiojavan.com/videos/video/chaartaar-ashoobam',
'md5': 'e85208ffa3ca8b83534fca9fe19af95b',
'info_dict': {
'id': 'chaartaar-ashoobam',
'ext': 'mp4',
'title': 'Chaartaar - Ashoobam',
'thumbnail': r're:^https?://.*\.jpe?g$',
'upload_date': '20150215',
'view_count': int,
'like_count': int,
'dislike_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
download_host = self._download_json(
'https://www.radiojavan.com/videos/video_host', video_id,
data=urlencode_postdata({'id': video_id}),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': url,
}).get('host', 'https://host1.rjmusicmedia.com')
webpage = self._download_webpage(url, video_id)
formats = []
for format_id, _, video_path in re.findall(
r'RJ\.video(?P<format_id>\d+[pPkK])\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2',
webpage):
f = parse_resolution(format_id)
f.update({
'url': urljoin(download_host, video_path),
'format_id': format_id,
})
formats.append(f)
self._sort_formats(formats)
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'class="date_added">Date added: ([^<]+)<',
webpage, 'upload date', fatal=False))
view_count = str_to_int(self._search_regex(
r'class="views">Plays: ([\d,]+)',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
r'class="rating">([\d,]+) likes',
webpage, 'like count', fatal=False))
dislike_count = str_to_int(self._search_regex(
r'class="rating">([\d,]+) dislikes',
webpage, 'dislike count', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'formats': formats,
}
|
unlicense
|
6f756cabc7981b981859de6b2a6e530d
| 32.26506
| 90
| 0.516842
| 3.553411
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
devscripts/make_lazy_extractors.py
|
7
|
2872
|
from __future__ import unicode_literals, print_function
from inspect import getsource
import io
import os
from os.path import dirname as dirn
import sys
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
lazy_extractors_filename = sys.argv[1]
if os.path.exists(lazy_extractors_filename):
os.remove(lazy_extractors_filename)
from youtube_dl.extractor import _ALL_CLASSES
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
with open('devscripts/lazy_load_template.py', 'rt') as f:
module_template = f.read()
module_contents = [
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
ie_template = '''
class {name}({bases}):
_VALID_URL = {valid_url!r}
_module = '{module}'
'''
make_valid_template = '''
@classmethod
def _make_valid_url(cls):
return {valid_url!r}
'''
def get_base_name(base):
if base is InfoExtractor:
return 'LazyLoadExtractor'
elif base is SearchInfoExtractor:
return 'LazyLoadSearchExtractor'
else:
return base.__name__
def build_lazy_ie(ie, name):
valid_url = getattr(ie, '_VALID_URL', None)
s = ie_template.format(
name=name,
bases=', '.join(map(get_base_name, ie.__bases__)),
valid_url=valid_url,
module=ie.__module__)
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
s += '\n' + getsource(ie.suitable)
if hasattr(ie, '_make_valid_url'):
# search extractors
s += make_valid_template.format(valid_url=ie._make_valid_url())
return s
# find the correct sorting and add the required base classes so that subclasses
# can be correctly created
classes = _ALL_CLASSES[:-1]
ordered_cls = []
while classes:
for c in classes[:]:
bases = set(c.__bases__) - set((object, InfoExtractor, SearchInfoExtractor))
stop = False
for b in bases:
if b not in classes and b not in ordered_cls:
if b.__name__ == 'GenericIE':
exit()
classes.insert(0, b)
stop = True
if stop:
break
if all(b in ordered_cls for b in bases):
ordered_cls.append(c)
classes.remove(c)
break
ordered_cls.append(_ALL_CLASSES[-1])
names = []
for ie in ordered_cls:
name = ie.__name__
src = build_lazy_ie(ie, name)
module_contents.append(src)
if ie in _ALL_CLASSES:
names.append(name)
module_contents.append(
'_ALL_CLASSES = [{0}]'.format(', '.join(names)))
module_src = '\n'.join(module_contents) + '\n'
with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f:
f.write(module_src)
|
unlicense
|
f6134afd164c1a5b270e7ff5d20fc10b
| 27.72
| 110
| 0.629178
| 3.523926
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/tnaflix.py
|
20
|
12219
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_duration,
str_to_int,
unescapeHTML,
xpath_text,
)
class TNAFlixNetworkBaseIE(InfoExtractor):
# May be overridden in descendants if necessary
_CONFIG_REGEX = [
r'flashvars\.config\s*=\s*escape\("(?P<url>[^"]+)"',
r'<input[^>]+name="config\d?" value="(?P<url>[^"]+)"',
r'config\s*=\s*(["\'])(?P<url>(?:https?:)?//(?:(?!\1).)+)\1',
]
_HOST = 'tna'
_VKEY_SUFFIX = ''
_TITLE_REGEX = r'<input[^>]+name="title" value="([^"]+)"'
_DESCRIPTION_REGEX = r'<input[^>]+name="description" value="([^"]+)"'
_UPLOADER_REGEX = r'<input[^>]+name="username" value="([^"]+)"'
_VIEW_COUNT_REGEX = None
_COMMENT_COUNT_REGEX = None
_AVERAGE_RATING_REGEX = None
_CATEGORIES_REGEX = r'<li[^>]*>\s*<span[^>]+class="infoTitle"[^>]*>Categories:</span>\s*<span[^>]+class="listView"[^>]*>(.+?)</span>\s*</li>'
def _extract_thumbnails(self, flix_xml):
def get_child(elem, names):
for name in names:
child = elem.find(name)
if child is not None:
return child
timeline = get_child(flix_xml, ['timeline', 'rolloverBarImage'])
if timeline is None:
return
pattern_el = get_child(timeline, ['imagePattern', 'pattern'])
if pattern_el is None or not pattern_el.text:
return
first_el = get_child(timeline, ['imageFirst', 'first'])
last_el = get_child(timeline, ['imageLast', 'last'])
if first_el is None or last_el is None:
return
first_text = first_el.text
last_text = last_el.text
if not first_text.isdigit() or not last_text.isdigit():
return
first = int(first_text)
last = int(last_text)
if first > last:
return
width = int_or_none(xpath_text(timeline, './imageWidth', 'thumbnail width'))
height = int_or_none(xpath_text(timeline, './imageHeight', 'thumbnail height'))
return [{
'url': self._proto_relative_url(pattern_el.text.replace('#', compat_str(i)), 'http:'),
'width': width,
'height': height,
} for i in range(first, last + 1)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
for display_id_key in ('display_id', 'display_id_2'):
if display_id_key in mobj.groupdict():
display_id = mobj.group(display_id_key)
if display_id:
break
else:
display_id = video_id
webpage = self._download_webpage(url, display_id)
cfg_url = self._proto_relative_url(self._html_search_regex(
self._CONFIG_REGEX, webpage, 'flashvars.config', default=None,
group='url'), 'http:')
if not cfg_url:
inputs = self._hidden_inputs(webpage)
cfg_url = ('https://cdn-fck.%sflix.com/%sflix/%s%s.fid?key=%s&VID=%s&premium=1&vip=1&alpha'
% (self._HOST, self._HOST, inputs['vkey'], self._VKEY_SUFFIX, inputs['nkey'], video_id))
cfg_xml = self._download_xml(
cfg_url, display_id, 'Downloading metadata',
transform_source=fix_xml_ampersands, headers={'Referer': url})
formats = []
def extract_video_url(vl):
# Any URL modification now results in HTTP Error 403: Forbidden
return unescapeHTML(vl.text)
video_link = cfg_xml.find('./videoLink')
if video_link is not None:
formats.append({
'url': extract_video_url(video_link),
'ext': xpath_text(cfg_xml, './videoConfig/type', 'type', default='flv'),
})
for item in cfg_xml.findall('./quality/item'):
video_link = item.find('./videoLink')
if video_link is None:
continue
res = item.find('res')
format_id = None if res is None else res.text
height = int_or_none(self._search_regex(
r'^(\d+)[pP]', format_id, 'height', default=None))
formats.append({
'url': self._proto_relative_url(extract_video_url(video_link), 'http:'),
'format_id': format_id,
'height': height,
})
self._sort_formats(formats)
thumbnail = self._proto_relative_url(
xpath_text(cfg_xml, './startThumb', 'thumbnail'), 'http:')
thumbnails = self._extract_thumbnails(cfg_xml)
title = None
if self._TITLE_REGEX:
title = self._html_search_regex(
self._TITLE_REGEX, webpage, 'title', default=None)
if not title:
title = self._og_search_title(webpage)
age_limit = self._rta_search(webpage) or 18
duration = parse_duration(self._html_search_meta(
'duration', webpage, 'duration', default=None))
def extract_field(pattern, name):
return self._html_search_regex(pattern, webpage, name, default=None) if pattern else None
description = extract_field(self._DESCRIPTION_REGEX, 'description')
uploader = extract_field(self._UPLOADER_REGEX, 'uploader')
view_count = str_to_int(extract_field(self._VIEW_COUNT_REGEX, 'view count'))
comment_count = str_to_int(extract_field(self._COMMENT_COUNT_REGEX, 'comment count'))
average_rating = float_or_none(extract_field(self._AVERAGE_RATING_REGEX, 'average rating'))
categories_str = extract_field(self._CATEGORIES_REGEX, 'categories')
categories = [c.strip() for c in categories_str.split(',')] if categories_str is not None else []
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'thumbnails': thumbnails,
'duration': duration,
'age_limit': age_limit,
'uploader': uploader,
'view_count': view_count,
'comment_count': comment_count,
'average_rating': average_rating,
'categories': categories,
'formats': formats,
}
class TNAFlixNetworkEmbedIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://player\.(?:tna|emp)flix\.com/video/(?P<id>\d+)'
_TITLE_REGEX = r'<title>([^<]+)</title>'
_TESTS = [{
'url': 'https://player.tnaflix.com/video/6538',
'info_dict': {
'id': '6538',
'display_id': '6538',
'ext': 'mp4',
'title': 'Educational xxx video',
'thumbnail': r're:https?://.*\.jpg$',
'age_limit': 18,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://player.empflix.com/video/33051',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.(?:tna|emp)flix\.com/video/\d+)\1',
webpage)]
class TNAEMPFlixBaseIE(TNAFlixNetworkBaseIE):
_DESCRIPTION_REGEX = r'(?s)>Description:</[^>]+>(.+?)<'
_UPLOADER_REGEX = r'<span>by\s*<a[^>]+\bhref=["\']/profile/[^>]+>([^<]+)<'
_CATEGORIES_REGEX = r'(?s)<span[^>]*>Categories:</span>(.+?)</div>'
class TNAFlixIE(TNAEMPFlixBaseIE):
_VALID_URL = r'https?://(?:www\.)?tnaflix\.com/[^/]+/(?P<display_id>[^/]+)/video(?P<id>\d+)'
_TITLE_REGEX = r'<title>(.+?) - (?:TNAFlix Porn Videos|TNAFlix\.com)</title>'
_TESTS = [{
# anonymous uploader, no categories
'url': 'http://www.tnaflix.com/porn-stars/Carmella-Decesare-striptease/video553878',
'md5': '7e569419fe6d69543d01e6be22f5f7c4',
'info_dict': {
'id': '553878',
'display_id': 'Carmella-Decesare-striptease',
'ext': 'mp4',
'title': 'Carmella Decesare - striptease',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 91,
'age_limit': 18,
'categories': ['Porn Stars'],
}
}, {
# non-anonymous uploader, categories
'url': 'https://www.tnaflix.com/teen-porn/Educational-xxx-video/video6538',
'md5': '0f5d4d490dbfd117b8607054248a07c0',
'info_dict': {
'id': '6538',
'display_id': 'Educational-xxx-video',
'ext': 'mp4',
'title': 'Educational xxx video',
'description': 'md5:b4fab8f88a8621c8fabd361a173fe5b8',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 164,
'age_limit': 18,
'uploader': 'bobwhite39',
'categories': list,
}
}, {
'url': 'https://www.tnaflix.com/amateur-porn/bunzHD-Ms.Donk/video358632',
'only_matching': True,
}]
class EMPFlixIE(TNAEMPFlixBaseIE):
_VALID_URL = r'https?://(?:www\.)?empflix\.com/(?:videos/(?P<display_id>.+?)-|[^/]+/(?P<display_id_2>[^/]+)/video)(?P<id>[0-9]+)'
_HOST = 'emp'
_VKEY_SUFFIX = '-1'
_TESTS = [{
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'md5': 'bc30d48b91a7179448a0bda465114676',
'info_dict': {
'id': '33051',
'display_id': 'Amateur-Finger-Fuck',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 83,
'age_limit': 18,
'uploader': 'cwbike',
'categories': ['Amateur', 'Anal', 'Fisting', 'Home made', 'Solo'],
}
}, {
'url': 'http://www.empflix.com/videos/[AROMA][ARMD-718]-Aoi-Yoshino-Sawa-25826.html',
'only_matching': True,
}, {
'url': 'https://www.empflix.com/amateur-porn/Amateur-Finger-Fuck/video33051',
'only_matching': True,
}]
class MovieFapIE(TNAFlixNetworkBaseIE):
_VALID_URL = r'https?://(?:www\.)?moviefap\.com/videos/(?P<id>[0-9a-f]+)/(?P<display_id>[^/]+)\.html'
_VIEW_COUNT_REGEX = r'<br>Views\s*<strong>([\d,.]+)</strong>'
_COMMENT_COUNT_REGEX = r'<span[^>]+id="comCount"[^>]*>([\d,.]+)</span>'
_AVERAGE_RATING_REGEX = r'Current Rating\s*<br>\s*<strong>([\d.]+)</strong>'
_CATEGORIES_REGEX = r'(?s)<div[^>]+id="vid_info"[^>]*>\s*<div[^>]*>.+?</div>(.*?)<br>'
_TESTS = [{
# normal, multi-format video
'url': 'http://www.moviefap.com/videos/be9867c9416c19f54a4a/experienced-milf-amazing-handjob.html',
'md5': '26624b4e2523051b550067d547615906',
'info_dict': {
'id': 'be9867c9416c19f54a4a',
'display_id': 'experienced-milf-amazing-handjob',
'ext': 'mp4',
'title': 'Experienced MILF Amazing Handjob',
'description': 'Experienced MILF giving an Amazing Handjob',
'thumbnail': r're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'darvinfred06',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Masturbation', 'Mature', 'Flashing'],
}
}, {
# quirky single-format case where the extension is given as fid, but the video is really an flv
'url': 'http://www.moviefap.com/videos/e5da0d3edce5404418f5/jeune-couple-russe.html',
'md5': 'fa56683e291fc80635907168a743c9ad',
'info_dict': {
'id': 'e5da0d3edce5404418f5',
'display_id': 'jeune-couple-russe',
'ext': 'flv',
'title': 'Jeune Couple Russe',
'description': 'Amateur',
'thumbnail': r're:https?://.*\.jpg$',
'age_limit': 18,
'uploader': 'whiskeyjar',
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['Amateur', 'Teen'],
}
}]
|
unlicense
|
d48ce5df7e4404bd45505f29d3f6f0a9
| 36.366972
| 145
| 0.536214
| 3.401726
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/facebook.py
|
3
|
30065
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
int_or_none,
js_to_json,
limit_length,
parse_count,
qualities,
sanitized_Request,
try_get,
urlencode_postdata,
urljoin,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com|facebookcorewwwi\.onion)/
(?:[^#]*?\#!/)?
(?:
(?:
video/video\.php|
photo\.php|
video\.php|
video/embed|
story\.php|
watch(?:/live)?/?
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
groups/[^/]+/permalink/|
watchparty/
)|
facebook:
)
(?P<id>[0-9]+)
'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s'
_VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
'upload_date': '20140908',
'timestamp': 1410199200,
},
'skip': 'Requires logging in',
}, {
# data.video
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 're:^Asif Nawab Butt posted a video',
'uploader': 'Asif Nawab Butt',
'upload_date': '20140506',
'timestamp': 1399398998,
'thumbnail': r're:^https?://.*',
},
'expected_warnings': [
'title'
]
}, {
'note': 'Video with DASH manifest',
'url': 'https://www.facebook.com/video.php?v=957955867617029',
'md5': 'b2c28d528273b323abe5c6ab59f0f030',
'info_dict': {
'id': '957955867617029',
'ext': 'mp4',
'title': 'When you post epic content on instagram.com/433 8 million followers, this is ...',
'uploader': 'Demy de Zeeuw',
'upload_date': '20160110',
'timestamp': 1452431627,
},
'skip': 'Requires logging in',
}, {
'url': 'https://www.facebook.com/maxlayn/posts/10153807558977570',
'md5': '037b1fa7f3c2d02b7a0d7bc16031ecc6',
'info_dict': {
'id': '544765982287235',
'ext': 'mp4',
'title': '"What are you doing running in the snow?"',
'uploader': 'FailArmy',
},
'skip': 'Video gone',
}, {
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
'info_dict': {
'id': '1035862816472149',
'ext': 'mp4',
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
'uploader': 'S. Saint',
},
'skip': 'Video gone',
}, {
'note': 'swf params escaped',
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
'md5': '97ba073838964d12c70566e0085c2b91',
'info_dict': {
'id': '10153664894881749',
'ext': 'mp4',
'title': 'Average time to confirm recent Supreme Court nominees: 67 days Longest it\'s t...',
'thumbnail': r're:^https?://.*',
'timestamp': 1456259628,
'upload_date': '20160223',
'uploader': 'Barack Obama',
},
}, {
# have 1080P, but only up to 720p in swf params
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/cnn/videos/10155529876156509/',
'md5': '9571fae53d4165bbbadb17a94651dcdc',
'info_dict': {
'id': '10155529876156509',
'ext': 'mp4',
'title': 'She survived the holocaust — and years later, she’s getting her citizenship s...',
'timestamp': 1477818095,
'upload_date': '20161030',
'uploader': 'CNN',
'thumbnail': r're:^https?://.*',
'view_count': int,
},
}, {
# bigPipe.onPageletArrive ... onPageletArrive pagelet_group_mall
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/yaroslav.korpan/videos/1417995061575415/',
'info_dict': {
'id': '1417995061575415',
'ext': 'mp4',
'title': 'md5:1db063d6a8c13faa8da727817339c857',
'timestamp': 1486648217,
'upload_date': '20170209',
'uploader': 'Yaroslav Korpan',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471',
'info_dict': {
'id': '1072691702860471',
'ext': 'mp4',
'title': 'md5:ae2d22a93fbb12dad20dc393a869739d',
'timestamp': 1477305000,
'upload_date': '20161024',
'uploader': 'La Guía Del Varón',
'thumbnail': r're:^https?://.*',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/1024490957622648/permalink/1396382447100162/',
'info_dict': {
'id': '1396382447100162',
'ext': 'mp4',
'title': 'md5:19a428bbde91364e3de815383b54a235',
'timestamp': 1486035494,
'upload_date': '20170202',
'uploader': 'Elisabeth Ahtn',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
# data.mediaset.currMedia.edges
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}, {
# data.video.story.attachments[].media
'url': 'facebook:544765982287235',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/164828000315060/permalink/764967300301124/',
'only_matching': True,
}, {
# data.video.creation_story.attachments[].media
'url': 'https://zh-hk.facebook.com/peoplespower/videos/1135894589806027/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebookcorewwwi.onion/video.php?v=274175099429670',
'only_matching': True,
}, {
# no title
'url': 'https://www.facebook.com/onlycleverentertainment/videos/1947995502095005/',
'only_matching': True,
}, {
# data.video
'url': 'https://www.facebook.com/WatchESLOne/videos/359649331226507/',
'info_dict': {
'id': '359649331226507',
'ext': 'mp4',
'title': '#ESLOne VoD - Birmingham Finals Day#1 Fnatic vs. @Evil Geniuses',
'uploader': 'ESL One Dota 2',
},
'params': {
'skip_download': True,
},
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/100033620354545/videos/106560053808006/',
'info_dict': {
'id': '106560053808006',
},
'playlist_count': 2,
}, {
# data.video.story.attachments[].media
'url': 'https://www.facebook.com/watch/?v=647537299265662',
'only_matching': True,
}, {
# data.node.comet_sections.content.story.attachments[].style_type_renderer.attachment.all_subattachments.nodes[].media
'url': 'https://www.facebook.com/PankajShahLondon/posts/10157667649866271',
'info_dict': {
'id': '10157667649866271',
},
'playlist_count': 3,
}, {
# data.nodes[].comet_sections.content.story.attachments[].style_type_renderer.attachment.media
'url': 'https://m.facebook.com/Alliance.Police.Department/posts/4048563708499330',
'info_dict': {
'id': '117576630041613',
'ext': 'mp4',
# TODO: title can be extracted from video page
'title': 'Facebook video #117576630041613',
'uploader_id': '189393014416438',
'upload_date': '20201123',
'timestamp': 1606162592,
},
'skip': 'Requires logging in',
}, {
# node.comet_sections.content.story.attached_story.attachments.style_type_renderer.attachment.media
'url': 'https://www.facebook.com/groups/ateistiskselskab/permalink/10154930137678856/',
'info_dict': {
'id': '211567722618337',
'ext': 'mp4',
'title': 'Facebook video #211567722618337',
'uploader_id': '127875227654254',
'upload_date': '20161122',
'timestamp': 1479793574,
},
}, {
# data.video.creation_story.attachments[].media
'url': 'https://www.facebook.com/watch/live/?v=1823658634322275',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/watchparty/211641140192478',
'info_dict': {
'id': '211641140192478',
},
'playlist_count': 1,
'skip': 'Requires logging in',
}]
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
_api_config = {
'graphURI': '/api/graphql/'
}
@staticmethod
def _extract_urls(webpage):
urls = []
for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
webpage):
urls.append(mobj.group('url'))
# Facebook API embed
# see https://developers.facebook.com/docs/plugins/embedded-video-player
for mobj in re.finditer(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage):
urls.append(mobj.group('url'))
return urls
def _login(self):
useremail, password = self._get_login_info()
if useremail is None:
return
login_page_req = sanitized_Request(self._LOGIN_URL)
self._set_cookie('facebook.com', 'locale', 'en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
login_results, 'login error', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
self._downloader.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
return
fb_dtsg = self._search_regex(
r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None)
h = self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None)
if not fb_dtsg or not h:
return
check_form = {
'fb_dtsg': fb_dtsg,
'h': h,
'name_action_selected': 'dont_save',
}
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err))
return
def _real_initialize(self):
self._login()
def _extract_from_url(self, url, video_id):
webpage = self._download_webpage(
url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id)
video_data = None
def extract_video_data(instances):
video_data = []
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
video_data.append(video_item['videoData'])
return video_data
server_js_data = self._parse_json(self._search_regex(
[r'handleServerJS\(({.+})(?:\);|,")', r'\bs\.handle\(({.+?})\);'],
webpage, 'server js data', default='{}'), video_id, fatal=False)
if server_js_data:
video_data = extract_video_data(server_js_data.get('instances', []))
def extract_from_jsmods_instances(js_data):
if js_data:
return extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(video, formats):
dash_manifest = video.get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
def process_formats(formats):
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in formats:
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
self._sort_formats(formats)
def extract_relay_data(_filter):
return self._parse_json(self._search_regex(
r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter,
webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
def extract_relay_prefetched_data(_filter):
replay_data = extract_relay_data(_filter)
for require in (replay_data.get('require') or []):
if require[0] == 'RelayPrefetchedStreamCache':
return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
if not video_data:
server_js_data = self._parse_json(self._search_regex([
r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
r'bigPipe\.onPageletArrive\(({.*?id\s*:\s*"%s".*?})\);' % self._SUPPORTED_PAGLETS_REGEX
], webpage, 'js data', default='{}'), video_id, js_to_json, False)
video_data = extract_from_jsmods_instances(server_js_data)
if not video_data:
data = extract_relay_prefetched_data(
r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"')
if data:
entries = []
def parse_graphql_video(video):
formats = []
q = qualities(['sd', 'hd'])
for (suffix, format_id) in [('', 'sd'), ('_quality_hd', 'hd')]:
playable_url = video.get('playable_url' + suffix)
if not playable_url:
continue
formats.append({
'format_id': format_id,
'quality': q(format_id),
'url': playable_url,
})
extract_dash_manifest(video, formats)
process_formats(formats)
v_id = video.get('videoId') or video.get('id') or video_id
info = {
'id': v_id,
'formats': formats,
'thumbnail': try_get(video, lambda x: x['thumbnailImage']['uri']),
'uploader_id': try_get(video, lambda x: x['owner']['id']),
'timestamp': int_or_none(video.get('publish_time')),
'duration': float_or_none(video.get('playable_duration_in_ms'), 1000),
}
description = try_get(video, lambda x: x['savable_description']['text'])
title = video.get('name')
if title:
info.update({
'title': title,
'description': description,
})
else:
info['title'] = description or 'Facebook video #%s' % v_id
entries.append(info)
def parse_attachment(attachment, key='media'):
media = attachment.get(key) or {}
if media.get('__typename') == 'Video':
return parse_graphql_video(media)
nodes = data.get('nodes') or []
node = data.get('node') or {}
if not nodes and node:
nodes.append(node)
for node in nodes:
story = try_get(node, lambda x: x['comet_sections']['content']['story'], dict) or {}
attachments = try_get(story, [
lambda x: x['attached_story']['attachments'],
lambda x: x['attachments']
], list) or []
for attachment in attachments:
attachment = try_get(attachment, lambda x: x['style_type_renderer']['attachment'], dict)
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
for n in ns:
parse_attachment(n)
parse_attachment(attachment)
edges = try_get(data, lambda x: x['mediaset']['currMedia']['edges'], list) or []
for edge in edges:
parse_attachment(edge, key='node')
video = data.get('video') or {}
if video:
attachments = try_get(video, [
lambda x: x['story']['attachments'],
lambda x: x['creation_story']['attachments']
], list) or []
for attachment in attachments:
parse_attachment(attachment)
if not entries:
parse_graphql_video(video)
return self.playlist_result(entries, video_id)
if not video_data:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
elif '>You must log in to continue' in webpage:
self.raise_login_required()
if not video_data and '/watchparty/' in url:
post_data = {
'doc_id': 3731964053542869,
'variables': json.dumps({
'livingRoomID': video_id,
}),
}
prefetched_data = extract_relay_prefetched_data(r'"login_data"\s*:\s*{')
if prefetched_data:
lsd = try_get(prefetched_data, lambda x: x['login_data']['lsd'], dict)
if lsd:
post_data[lsd['name']] = lsd['value']
relay_data = extract_relay_data(r'\[\s*"RelayAPIConfigDefaults"\s*,')
for define in (relay_data.get('define') or []):
if define[0] == 'RelayAPIConfigDefaults':
self._api_config = define[2]
living_room = self._download_json(
urljoin(url, self._api_config['graphURI']), video_id,
data=urlencode_postdata(post_data))['data']['living_room']
entries = []
for edge in (try_get(living_room, lambda x: x['recap']['watched_content']['edges']) or []):
video = try_get(edge, lambda x: x['node']['video']) or {}
v_id = video.get('id')
if not v_id:
continue
v_id = compat_str(v_id)
entries.append(self.url_result(
self._VIDEO_PAGE_TEMPLATE % v_id,
self.ie_key(), v_id, video.get('name')))
return self.playlist_result(entries, video_id)
if not video_data:
# Video info not in first request, do a secondary request using
# tahoe player specific URL
tahoe_data = self._download_webpage(
self._VIDEO_PAGE_TAHOE_TEMPLATE % video_id, video_id,
data=urlencode_postdata({
'__a': 1,
'__pc': self._search_regex(
r'pkg_cohort["\']\s*:\s*["\'](.+?)["\']', webpage,
'pkg cohort', default='PHASED:DEFAULT'),
'__rev': self._search_regex(
r'client_revision["\']\s*:\s*(\d+),', webpage,
'client revision', default='3944515'),
'fb_dtsg': self._search_regex(
r'"DTSGInitialData"\s*,\s*\[\]\s*,\s*{\s*"token"\s*:\s*"([^"]+)"',
webpage, 'dtsg token', default=''),
}),
headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
tahoe_js_data = self._parse_json(
self._search_regex(
r'for\s+\(\s*;\s*;\s*\)\s*;(.+)', tahoe_data,
'tahoe js data', default='{}'),
video_id, fatal=False)
video_data = extract_from_jsmods_instances(tahoe_js_data)
if not video_data:
raise ExtractorError('Cannot parse data')
if len(video_data) > 1:
entries = []
for v in video_data:
video_url = v[0].get('video_url')
if not video_url:
continue
entries.append(self.url_result(urljoin(
url, video_url), self.ie_key(), v[0].get('video_id')))
return self.playlist_result(entries, video_id)
video_data = video_data[0]
formats = []
subtitles = {}
for f in video_data:
format_id = f['stream_type']
if f and isinstance(f, dict):
f = [f]
if not f or not isinstance(f, list):
continue
for quality in ('sd', 'hd'):
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
preference = -10 if format_id == 'progressive' else 0
if quality == 'hd':
preference += 5
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
'preference': preference,
})
extract_dash_manifest(f[0], formats)
subtitles_src = f[0].get('subtitles_src')
if subtitles_src:
subtitles.setdefault('en', []).append({'url': subtitles_src})
if not formats:
raise ExtractorError('Cannot find video formats')
process_formats(formats)
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage,
'title', default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
if not video_title:
video_title = self._html_search_meta(
'description', webpage, 'title', default=None)
if video_title:
video_title = limit_length(video_title, 80)
else:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id(
'fbPhotoPageAuthorName', webpage)) or self._search_regex(
r'ownerName\s*:\s*"([^"]+)"', webpage, 'uploader',
default=None) or self._og_search_title(webpage, fatal=False)
timestamp = int_or_none(self._search_regex(
r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
'timestamp', default=None))
thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage)
view_count = parse_count(self._search_regex(
r'\bviewCount\s*:\s*["\']([\d,.]+)', webpage, 'view count',
default=None))
info_dict = {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'timestamp': timestamp,
'thumbnail': thumbnail,
'view_count': view_count,
'subtitles': subtitles,
}
return info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
real_url = self._VIDEO_PAGE_TEMPLATE % video_id if url.startswith('facebook:') else url
return self._extract_from_url(real_url, video_id)
class FacebookPluginsVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/plugins/video\.php\?.*?\bhref=(?P<id>https.+)'
_TESTS = [{
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fgov.sg%2Fvideos%2F10154383743583686%2F&show_text=0&width=560',
'md5': '5954e92cdfe51fe5782ae9bda7058a07',
'info_dict': {
'id': '10154383743583686',
'ext': 'mp4',
'title': 'What to do during the haze?',
'uploader': 'Gov.sg',
'upload_date': '20160826',
'timestamp': 1472184808,
},
'add_ie': [FacebookIE.ie_key()],
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https%3A%2F%2Fwww.facebook.com%2Fvideo.php%3Fv%3D10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/plugins/video.php?href=https://www.facebook.com/gov.sg/videos/10154383743583686/&show_text=0&width=560',
'only_matching': True,
}]
def _real_extract(self, url):
return self.url_result(
compat_urllib_parse_unquote(self._match_id(url)),
FacebookIE.ie_key())
|
unlicense
|
baa6bd7c063efc93d26c60d68a8780ac
| 41.576487
| 159
| 0.50341
| 3.738217
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/techtalks.py
|
71
|
2529
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
get_element_by_attribute,
clean_html,
)
class TechTalksIE(InfoExtractor):
_VALID_URL = r'https?://techtalks\.tv/talks/(?:[^/]+/)?(?P<id>\d+)'
_TESTS = [{
'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
'info_dict': {
'id': '57758',
'title': 'Learning Topic Models --- Going beyond SVD',
},
'playlist': [
{
'info_dict': {
'id': '57758',
'ext': 'flv',
'title': 'Learning Topic Models --- Going beyond SVD',
},
},
{
'info_dict': {
'id': '57758-slides',
'ext': 'flv',
'title': 'Learning Topic Models --- Going beyond SVD',
},
},
],
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://techtalks.tv/talks/57758',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
talk_id = mobj.group('id')
webpage = self._download_webpage(url, talk_id)
rtmp_url = self._search_regex(
r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url')
play_path = self._search_regex(
r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
webpage, 'presenter play path')
title = clean_html(get_element_by_attribute('class', 'title', webpage))
video_info = {
'id': talk_id,
'title': title,
'url': rtmp_url,
'play_path': play_path,
'ext': 'flv',
}
m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage)
if m_slides is None:
return video_info
else:
return {
'_type': 'playlist',
'id': talk_id,
'title': title,
'entries': [
video_info,
# The slides video
{
'id': talk_id + '-slides',
'title': title,
'url': rtmp_url,
'play_path': m_slides.group(1),
'ext': 'flv',
},
],
}
|
unlicense
|
182456187c793bdb8d48ff27ac915ab9
| 29.841463
| 89
| 0.411625
| 4.020668
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/aliexpress.py
|
36
|
1581
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
try_get,
)
class AliExpressLiveIE(InfoExtractor):
_VALID_URL = r'https?://live\.aliexpress\.com/live/(?P<id>\d+)'
_TEST = {
'url': 'https://live.aliexpress.com/live/2800002704436634',
'md5': 'e729e25d47c5e557f2630eaf99b740a5',
'info_dict': {
'id': '2800002704436634',
'ext': 'mp4',
'title': 'CASIMA7.22',
'thumbnail': r're:http://.*\.jpg',
'uploader': 'CASIMA Official Store',
'timestamp': 1500717600,
'upload_date': '20170722',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
r'(?s)runParams\s*=\s*({.+?})\s*;?\s*var',
webpage, 'runParams'),
video_id)
title = data['title']
formats = self._extract_m3u8_formats(
data['replyStreamUrl'], video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls')
return {
'id': video_id,
'title': title,
'thumbnail': data.get('coverUrl'),
'uploader': try_get(
data, lambda x: x['followBar']['name'], compat_str),
'timestamp': float_or_none(data.get('startTimeLong'), scale=1000),
'formats': formats,
}
|
unlicense
|
9e5c67b1bbb1037fc56652af457fefc0
| 28.830189
| 78
| 0.523087
| 3.444444
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/spotify.py
|
5
|
5739
|
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
clean_podcast_url,
float_or_none,
int_or_none,
strip_or_none,
try_get,
unified_strdate,
)
class SpotifyBaseIE(InfoExtractor):
_ACCESS_TOKEN = None
_OPERATION_HASHES = {
'Episode': '8276d4423d709ae9b68ec1b74cc047ba0f7479059a37820be730f125189ac2bf',
'MinimalShow': '13ee079672fad3f858ea45a55eb109553b4fb0969ed793185b2e34cbb6ee7cc0',
'ShowEpisodes': 'e0e5ce27bd7748d2c59b4d44ba245a8992a05be75d6fabc3b20753fc8857444d',
}
_VALID_URL_TEMPL = r'https?://open\.spotify\.com/%s/(?P<id>[^/?&#]+)'
def _real_initialize(self):
self._ACCESS_TOKEN = self._download_json(
'https://open.spotify.com/get_access_token', None)['accessToken']
def _call_api(self, operation, video_id, variables):
return self._download_json(
'https://api-partner.spotify.com/pathfinder/v1/query', video_id, query={
'operationName': 'query' + operation,
'variables': json.dumps(variables),
'extensions': json.dumps({
'persistedQuery': {
'sha256Hash': self._OPERATION_HASHES[operation],
},
})
}, headers={'authorization': 'Bearer ' + self._ACCESS_TOKEN})['data']
def _extract_episode(self, episode, series):
episode_id = episode['id']
title = episode['name'].strip()
formats = []
audio_preview = episode.get('audioPreview') or {}
audio_preview_url = audio_preview.get('url')
if audio_preview_url:
f = {
'url': audio_preview_url.replace('://p.scdn.co/mp3-preview/', '://anon-podcast.scdn.co/'),
'vcodec': 'none',
}
audio_preview_format = audio_preview.get('format')
if audio_preview_format:
f['format_id'] = audio_preview_format
mobj = re.match(r'([0-9A-Z]{3})_(?:[A-Z]+_)?(\d+)', audio_preview_format)
if mobj:
f.update({
'abr': int(mobj.group(2)),
'ext': mobj.group(1).lower(),
})
formats.append(f)
for item in (try_get(episode, lambda x: x['audio']['items']) or []):
item_url = item.get('url')
if not (item_url and item.get('externallyHosted')):
continue
formats.append({
'url': clean_podcast_url(item_url),
'vcodec': 'none',
})
thumbnails = []
for source in (try_get(episode, lambda x: x['coverArt']['sources']) or []):
source_url = source.get('url')
if not source_url:
continue
thumbnails.append({
'url': source_url,
'width': int_or_none(source.get('width')),
'height': int_or_none(source.get('height')),
})
return {
'id': episode_id,
'title': title,
'formats': formats,
'thumbnails': thumbnails,
'description': strip_or_none(episode.get('description')),
'duration': float_or_none(try_get(
episode, lambda x: x['duration']['totalMilliseconds']), 1000),
'release_date': unified_strdate(try_get(
episode, lambda x: x['releaseDate']['isoString'])),
'series': series,
}
class SpotifyIE(SpotifyBaseIE):
IE_NAME = 'spotify'
_VALID_URL = SpotifyBaseIE._VALID_URL_TEMPL % 'episode'
_TEST = {
'url': 'https://open.spotify.com/episode/4Z7GAJ50bgctf6uclHlWKo',
'md5': '74010a1e3fa4d9e1ab3aa7ad14e42d3b',
'info_dict': {
'id': '4Z7GAJ50bgctf6uclHlWKo',
'ext': 'mp3',
'title': 'From the archive: Why time management is ruining our lives',
'description': 'md5:b120d9c4ff4135b42aa9b6d9cde86935',
'duration': 2083.605,
'release_date': '20201217',
'series': "The Guardian's Audio Long Reads",
}
}
def _real_extract(self, url):
episode_id = self._match_id(url)
episode = self._call_api('Episode', episode_id, {
'uri': 'spotify:episode:' + episode_id
})['episode']
return self._extract_episode(
episode, try_get(episode, lambda x: x['podcast']['name']))
class SpotifyShowIE(SpotifyBaseIE):
IE_NAME = 'spotify:show'
_VALID_URL = SpotifyBaseIE._VALID_URL_TEMPL % 'show'
_TEST = {
'url': 'https://open.spotify.com/show/4PM9Ke6l66IRNpottHKV9M',
'info_dict': {
'id': '4PM9Ke6l66IRNpottHKV9M',
'title': 'The Story from the Guardian',
'description': 'The Story podcast is dedicated to our finest audio documentaries, investigations and long form stories',
},
'playlist_mincount': 36,
}
def _real_extract(self, url):
show_id = self._match_id(url)
podcast = self._call_api('ShowEpisodes', show_id, {
'limit': 1000000000,
'offset': 0,
'uri': 'spotify:show:' + show_id,
})['podcast']
podcast_name = podcast.get('name')
entries = []
for item in (try_get(podcast, lambda x: x['episodes']['items']) or []):
episode = item.get('episode')
if not episode:
continue
entries.append(self._extract_episode(episode, podcast_name))
return self.playlist_result(
entries, show_id, podcast_name, podcast.get('description'))
|
unlicense
|
95a55676dd74bbd27d6cbebbf4bad5fb
| 35.788462
| 132
| 0.542081
| 3.516544
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/inc.py
|
24
|
2299
|
from __future__ import unicode_literals
from .common import InfoExtractor
from .kaltura import KalturaIE
class IncIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?inc\.com/(?:[^/]+/)+(?P<id>[^.]+).html'
_TESTS = [{
'url': 'http://www.inc.com/tip-sheet/bill-gates-says-these-5-books-will-make-you-smarter.html',
'md5': '7416739c9c16438c09fa35619d6ba5cb',
'info_dict': {
'id': '1_wqig47aq',
'ext': 'mov',
'title': 'Bill Gates Says These 5 Books Will Make You Smarter',
'description': 'md5:bea7ff6cce100886fc1995acb743237e',
'timestamp': 1474414430,
'upload_date': '20160920',
'uploader_id': 'video@inc.com',
},
'params': {
'skip_download': True,
},
}, {
# div with id=kaltura_player_1_kqs38cgm
'url': 'https://www.inc.com/oscar-raymundo/richard-branson-young-entrepeneurs.html',
'info_dict': {
'id': '1_kqs38cgm',
'ext': 'mp4',
'title': 'Branson: "In the end, you have to say, Screw it. Just do it."',
'description': 'md5:21b832d034f9af5191ca5959da5e9cb6',
'timestamp': 1364403232,
'upload_date': '20130327',
'uploader_id': 'incdigital@inc.com',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.inc.com/video/david-whitford/founders-forum-tripadvisor-steve-kaufer-most-enjoyable-moment-for-entrepreneur.html',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
partner_id = self._search_regex(
r'var\s+_?bizo_data_partner_id\s*=\s*["\'](\d+)', webpage,
'partner id', default='1034971')
kaltura_id = self._search_regex(
r'id=(["\'])kaltura_player_(?P<id>.+?)\1', webpage, 'kaltura id',
default=None, group='id') or self._parse_json(self._search_regex(
r'pageInfo\.videos\s*=\s*\[(.+)\];', webpage, 'kaltura id'),
display_id)['vid_kaltura_id']
return self.url_result(
'kaltura:%s:%s' % (partner_id, kaltura_id), KalturaIE.ie_key())
|
unlicense
|
a90e74a7b8d5c2a4623c150d172c1126
| 37.966102
| 141
| 0.544585
| 3.036988
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/ctvnews.py
|
24
|
2276
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import orderedSet
class CTVNewsIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P<id>[0-9.]+)'
_TESTS = [{
'url': 'http://www.ctvnews.ca/video?clipId=901995',
'md5': '9b8624ba66351a23e0b6e1391971f9af',
'info_dict': {
'id': '901995',
'ext': 'flv',
'title': 'Extended: \'That person cannot be me\' Johnson says',
'description': 'md5:958dd3b4f5bbbf0ed4d045c790d89285',
'timestamp': 1467286284,
'upload_date': '20160630',
}
}, {
'url': 'http://www.ctvnews.ca/video?playlistId=1.2966224',
'info_dict':
{
'id': '1.2966224',
},
'playlist_mincount': 19,
}, {
'url': 'http://www.ctvnews.ca/video?binId=1.2876780',
'info_dict':
{
'id': '1.2876780',
},
'playlist_mincount': 100,
}, {
'url': 'http://www.ctvnews.ca/1.810401',
'only_matching': True,
}, {
'url': 'http://www.ctvnews.ca/canadiens-send-p-k-subban-to-nashville-in-blockbuster-trade-1.2967231',
'only_matching': True,
}, {
'url': 'http://vancouverisland.ctvnews.ca/video?clipId=761241',
'only_matching': True,
}]
def _real_extract(self, url):
page_id = self._match_id(url)
def ninecninemedia_url_result(clip_id):
return {
'_type': 'url_transparent',
'id': clip_id,
'url': '9c9media:ctvnews_web:%s' % clip_id,
'ie_key': 'NineCNineMedia',
}
if page_id.isdigit():
return ninecninemedia_url_result(page_id)
else:
webpage = self._download_webpage('http://www.ctvnews.ca/%s' % page_id, page_id, query={
'ot': 'example.AjaxPageLayout.ot',
'maxItemsPerPage': 1000000,
})
entries = [ninecninemedia_url_result(clip_id) for clip_id in orderedSet(
re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
return self.playlist_result(entries, page_id)
|
unlicense
|
f59ff8f3b7d39d780a956a65c23dd35a
| 32.470588
| 109
| 0.522408
| 3.13931
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/soundgasm.py
|
30
|
2414
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SoundgasmIE(InfoExtractor):
IE_NAME = 'soundgasm'
_VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_-]+)/(?P<display_id>[0-9a-zA-Z_-]+)'
_TEST = {
'url': 'http://soundgasm.net/u/ytdl/Piano-sample',
'md5': '010082a2c802c5275bb00030743e75ad',
'info_dict': {
'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9',
'ext': 'm4a',
'title': 'Piano sample',
'description': 'Royalty Free Sample Music',
'uploader': 'ytdl',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
audio_url = self._html_search_regex(
r'(?s)m4a\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
'audio URL', group='url')
title = self._search_regex(
r'<div[^>]+\bclass=["\']jp-title[^>]+>([^<]+)',
webpage, 'title', default=display_id)
description = self._html_search_regex(
(r'(?s)<div[^>]+\bclass=["\']jp-description[^>]+>(.+?)</div>',
r'(?s)<li>Description:\s(.*?)<\/li>'),
webpage, 'description', fatal=False)
audio_id = self._search_regex(
r'/([^/]+)\.m4a', audio_url, 'audio id', default=display_id)
return {
'id': audio_id,
'display_id': display_id,
'url': audio_url,
'vcodec': 'none',
'title': title,
'description': description,
'uploader': mobj.group('user'),
}
class SoundgasmProfileIE(InfoExtractor):
IE_NAME = 'soundgasm:profile'
_VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<id>[^/]+)/?(?:\#.*)?$'
_TEST = {
'url': 'http://soundgasm.net/u/ytdl',
'info_dict': {
'id': 'ytdl',
},
'playlist_count': 1,
}
def _real_extract(self, url):
profile_id = self._match_id(url)
webpage = self._download_webpage(url, profile_id)
entries = [
self.url_result(audio_url, 'Soundgasm')
for audio_url in re.findall(r'href="([^"]+/u/%s/[^"]+)' % profile_id, webpage)]
return self.playlist_result(entries, profile_id)
|
unlicense
|
8ccd9c0632d14da17ffeb86c62bda6f2
| 30.350649
| 111
| 0.508699
| 3.25776
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/egghead.py
|
3
|
4767
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
int_or_none,
try_get,
unified_timestamp,
url_or_none,
)
class EggheadBaseIE(InfoExtractor):
def _call_api(self, path, video_id, resource, fatal=True):
return self._download_json(
'https://app.egghead.io/api/v1/' + path,
video_id, 'Downloading %s JSON' % resource, fatal=fatal)
class EggheadCourseIE(EggheadBaseIE):
IE_DESC = 'egghead.io course'
IE_NAME = 'egghead:course'
_VALID_URL = r'https://egghead\.io/courses/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
'playlist_count': 29,
'info_dict': {
'id': '72',
'title': 'Professor Frisby Introduces Composable Functional JavaScript',
'description': 're:(?s)^This course teaches the ubiquitous.*You\'ll start composing functionality before you know it.$',
},
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
series_path = 'series/' + playlist_id
lessons = self._call_api(
series_path + '/lessons', playlist_id, 'course lessons')
entries = []
for lesson in lessons:
lesson_url = url_or_none(lesson.get('http_url'))
if not lesson_url:
continue
lesson_id = lesson.get('id')
if lesson_id:
lesson_id = compat_str(lesson_id)
entries.append(self.url_result(
lesson_url, ie=EggheadLessonIE.ie_key(), video_id=lesson_id))
course = self._call_api(
series_path, playlist_id, 'course', False) or {}
playlist_id = course.get('id')
if playlist_id:
playlist_id = compat_str(playlist_id)
return self.playlist_result(
entries, playlist_id, course.get('title'),
course.get('description'))
class EggheadLessonIE(EggheadBaseIE):
IE_DESC = 'egghead.io lesson'
IE_NAME = 'egghead:lesson'
_VALID_URL = r'https://egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
'info_dict': {
'id': '1196',
'display_id': 'javascript-linear-data-flow-with-container-style-types-box',
'ext': 'mp4',
'title': 'Create linear data flow with container style types (Box)',
'description': 'md5:9aa2cdb6f9878ed4c39ec09e85a8150e',
'thumbnail': r're:^https?:.*\.jpg$',
'timestamp': 1481296768,
'upload_date': '20161209',
'duration': 304,
'view_count': 0,
'tags': 'count:2',
},
'params': {
'skip_download': True,
'format': 'bestvideo',
},
}, {
'url': 'https://egghead.io/api/v1/lessons/react-add-redux-to-a-react-application',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
lesson = self._call_api(
'lessons/' + display_id, display_id, 'lesson')
lesson_id = compat_str(lesson['id'])
title = lesson['title']
formats = []
for _, format_url in lesson['media_urls'].items():
format_url = url_or_none(format_url)
if not format_url:
continue
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, lesson_id, 'mp4', entry_protocol='m3u8',
m3u8_id='hls', fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
format_url, lesson_id, mpd_id='dash', fatal=False))
else:
formats.append({
'url': format_url,
})
self._sort_formats(formats)
return {
'id': lesson_id,
'display_id': display_id,
'title': title,
'description': lesson.get('summary'),
'thumbnail': lesson.get('thumb_nail'),
'timestamp': unified_timestamp(lesson.get('published_at')),
'duration': int_or_none(lesson.get('duration')),
'view_count': int_or_none(lesson.get('plays_count')),
'tags': try_get(lesson, lambda x: x['tag_list'], list),
'series': try_get(
lesson, lambda x: x['series']['title'], compat_str),
'formats': formats,
}
|
unlicense
|
15ee99f60e99ce12eb17593c6d2930a7
| 34.574627
| 132
| 0.538913
| 3.523282
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/rtlnl.py
|
8
|
5896
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class RtlNlIE(InfoExtractor):
IE_NAME = 'rtl.nl'
IE_DESC = 'rtl.nl and rtlxl.nl'
_VALID_URL = r'''(?x)
https?://(?:(?:www|static)\.)?
(?:
rtlxl\.nl/(?:[^\#]*\#!|programma)/[^/]+/|
rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)|
embed\.rtl\.nl/\#uuid=
)
(?P<id>[0-9a-f-]+)'''
_TESTS = [{
# new URL schema
'url': 'https://www.rtlxl.nl/programma/rtl-nieuws/0bd1384d-d970-3086-98bb-5c104e10c26f',
'md5': '490428f1187b60d714f34e1f2e3af0b6',
'info_dict': {
'id': '0bd1384d-d970-3086-98bb-5c104e10c26f',
'ext': 'mp4',
'title': 'RTL Nieuws',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'timestamp': 1593293400,
'upload_date': '20200627',
'duration': 661.08,
},
}, {
# old URL schema
'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/82b1aad1-4a14-3d7b-b554-b0aed1b2c416',
'md5': '473d1946c1fdd050b2c0161a4b13c373',
'info_dict': {
'id': '82b1aad1-4a14-3d7b-b554-b0aed1b2c416',
'ext': 'mp4',
'title': 'RTL Nieuws',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'timestamp': 1461951000,
'upload_date': '20160429',
'duration': 1167.96,
},
'skip': '404',
}, {
# best format available a3t
'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false',
'md5': 'dea7474214af1271d91ef332fb8be7ea',
'info_dict': {
'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed',
'ext': 'mp4',
'timestamp': 1424039400,
'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag',
'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$',
'upload_date': '20150215',
'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.',
}
}, {
# empty synopsis and missing episodes (see https://github.com/ytdl-org/youtube-dl/issues/6275)
# best format available nettv
'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false',
'info_dict': {
'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a',
'ext': 'mp4',
'title': 'RTL Nieuws - Meer beelden van overval juwelier',
'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$',
'timestamp': 1437233400,
'upload_date': '20150718',
'duration': 30.474,
},
'params': {
'skip_download': True,
},
}, {
# encrypted m3u8 streams, georestricted
'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7',
'only_matching': True,
}, {
'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0',
'only_matching': True,
}, {
'url': 'http://rtlxl.nl/?_ga=1.204735956.572365465.1466978370#!/rtl-nieuws-132237/3c487912-023b-49ac-903e-2c5d79f8410f',
'only_matching': True,
}, {
'url': 'https://www.rtl.nl/video/c603c9c2-601d-4b5e-8175-64f1e942dc7d/',
'only_matching': True,
}, {
'url': 'https://static.rtl.nl/embed/?uuid=1a2970fc-5c0b-43ff-9fdc-927e39e6d1bc&autoplay=false&publicatiepunt=rtlnieuwsnl',
'only_matching': True,
}, {
# new embed URL schema
'url': 'https://embed.rtl.nl/#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false',
'only_matching': True,
}]
def _real_extract(self, url):
uuid = self._match_id(url)
info = self._download_json(
'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid,
uuid)
material = info['material'][0]
title = info['abstracts'][0]['name']
subtitle = material.get('title')
if subtitle:
title += ' - %s' % subtitle
description = material.get('synopsis')
meta = info.get('meta', {})
videopath = material['videopath']
m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath
formats = self._extract_m3u8_formats(
m3u8_url, uuid, 'mp4', m3u8_id='hls', fatal=False)
self._sort_formats(formats)
thumbnails = []
for p in ('poster_base_url', '"thumb_base_url"'):
if not meta.get(p):
continue
thumbnails.append({
'url': self._proto_relative_url(meta[p] + uuid),
'width': int_or_none(self._search_regex(
r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)),
'height': int_or_none(self._search_regex(
r'/sz=[0-9]+x([0-9]+)',
meta[p], 'thumbnail height', fatal=False))
})
return {
'id': uuid,
'title': title,
'formats': formats,
'timestamp': material['original_date'],
'description': description,
'duration': parse_duration(material.get('duration')),
'thumbnails': thumbnails,
}
|
unlicense
|
e649c63c75942534ca2aa43c25414b1e
| 39.369863
| 247
| 0.551748
| 2.875122
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/kusi.py
|
7
|
3118
|
# coding: utf-8
from __future__ import unicode_literals
import random
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
from ..utils import (
int_or_none,
float_or_none,
timeconvert,
update_url_query,
xpath_text,
)
class KUSIIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?kusi\.com/(?P<path>story/.+|video\?clipId=(?P<clipId>\d+))'
_TESTS = [{
'url': 'http://www.kusi.com/story/32849881/turko-files-refused-to-help-it-aint-right',
'md5': '4e76ce8e53660ce9697d06c0ba6fc47d',
'info_dict': {
'id': '12689020',
'ext': 'mp4',
'title': "Turko Files: Refused to Help, It Ain't Right!",
'duration': 223.586,
'upload_date': '20160826',
'timestamp': 1472233118,
'thumbnail': r're:^https?://.*\.jpg$'
},
}, {
'url': 'http://kusi.com/video?clipId=12203019',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
clip_id = mobj.group('clipId')
video_id = clip_id or mobj.group('path')
webpage = self._download_webpage(url, video_id)
if clip_id is None:
video_id = clip_id = self._html_search_regex(
r'"clipId"\s*,\s*"(\d+)"', webpage, 'clip id')
affiliate_id = self._search_regex(
r'affiliateId\s*:\s*\'([^\']+)\'', webpage, 'affiliate id')
# See __Packages/worldnow/model/GalleryModel.as of WNGallery.swf
xml_url = update_url_query('http://www.kusi.com/build.asp', {
'buildtype': 'buildfeaturexmlrequest',
'featureType': 'Clip',
'featureid': clip_id,
'affiliateno': affiliate_id,
'clientgroupid': '1',
'rnd': int(round(random.random() * 1000000)),
})
doc = self._download_xml(xml_url, video_id)
video_title = xpath_text(doc, 'HEADLINE', fatal=True)
duration = float_or_none(xpath_text(doc, 'DURATION'), scale=1000)
description = xpath_text(doc, 'ABSTRACT')
thumbnail = xpath_text(doc, './THUMBNAILIMAGE/FILENAME')
creation_time = timeconvert(xpath_text(doc, 'rfc822creationdate'))
quality_options = doc.find('{http://search.yahoo.com/mrss/}group').findall('{http://search.yahoo.com/mrss/}content')
formats = []
for quality in quality_options:
formats.append({
'url': compat_urllib_parse_unquote_plus(quality.attrib['url']),
'height': int_or_none(quality.attrib.get('height')),
'width': int_or_none(quality.attrib.get('width')),
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': description,
'duration': duration,
'formats': formats,
'thumbnail': thumbnail,
'timestamp': creation_time,
}
|
unlicense
|
83f7d882477e36127e3b12fc26625de0
| 34.431818
| 124
| 0.556126
| 3.464444
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/instagram.py
|
1
|
16595
|
from __future__ import unicode_literals
import itertools
import hashlib
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_HTTPError,
)
from ..utils import (
ExtractorError,
get_element_by_attribute,
int_or_none,
lowercase_escape,
std_headers,
try_get,
url_or_none,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/(?:p|tv|reel)/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1371748545,
'upload_date': '20130620',
'uploader_id': 'naomipq',
'uploader': 'B E A U T Y F O R A S H E S',
'like_count': int,
'comment_count': int,
'comments': list,
},
}, {
# missing description
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
'info_dict': {
'id': 'BA-pQFBG8HZ',
'ext': 'mp4',
'title': 'Video by britneyspears',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1453760977,
'upload_date': '20160125',
'uploader_id': 'britneyspears',
'uploader': 'Britney Spears',
'like_count': int,
'comment_count': int,
'comments': list,
},
'params': {
'skip_download': True,
},
}, {
# multi video post
'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
'playlist': [{
'info_dict': {
'id': 'BQ0dSaohpPW',
'ext': 'mp4',
'title': 'Video 1',
},
}, {
'info_dict': {
'id': 'BQ0dTpOhuHT',
'ext': 'mp4',
'title': 'Video 2',
},
}, {
'info_dict': {
'id': 'BQ0dT7RBFeF',
'ext': 'mp4',
'title': 'Video 3',
},
}],
'info_dict': {
'id': 'BQ0eAlwhDrw',
'title': 'Post by instagram',
'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
},
}, {
'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True,
}, {
'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
'only_matching': True,
}, {
'url': 'https://www.instagram.com/tv/aye83DjauH/',
'only_matching': True,
}, {
'url': 'https://www.instagram.com/reel/CDUMkliABpa/',
'only_matching': True,
}]
@staticmethod
def _extract_embed_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
webpage)
if mobj:
return mobj.group('url')
blockquote_el = get_element_by_attribute(
'class', 'instagram-media', webpage)
if blockquote_el is None:
return
mobj = re.search(
r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
if mobj:
return mobj.group('link')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = mobj.group('url')
webpage = self._download_webpage(url, video_id)
(media, video_url, description, thumbnail, timestamp, uploader,
uploader_id, like_count, comment_count, comments, height,
width) = [None] * 12
shared_data = self._parse_json(
self._search_regex(
r'window\._sharedData\s*=\s*({.+?});',
webpage, 'shared data', default='{}'),
video_id, fatal=False)
if shared_data:
media = try_get(
shared_data,
(lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
lambda x: x['entry_data']['PostPage'][0]['media']),
dict)
# _sharedData.entry_data.PostPage is empty when authenticated (see
# https://github.com/ytdl-org/youtube-dl/pull/22880)
if not media:
additional_data = self._parse_json(
self._search_regex(
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*({.+?})\s*\)\s*;',
webpage, 'additional data', default='{}'),
video_id, fatal=False)
if additional_data:
media = try_get(
additional_data, lambda x: x['graphql']['shortcode_media'],
dict)
if media:
video_url = media.get('video_url')
height = int_or_none(media.get('dimensions', {}).get('height'))
width = int_or_none(media.get('dimensions', {}).get('width'))
description = try_get(
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str) or media.get('caption')
thumbnail = media.get('display_src') or media.get('display_url')
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
uploader = media.get('owner', {}).get('full_name')
uploader_id = media.get('owner', {}).get('username')
def get_count(keys, kind):
if not isinstance(keys, (list, tuple)):
keys = [keys]
for key in keys:
count = int_or_none(try_get(
media, (lambda x: x['edge_media_%s' % key]['count'],
lambda x: x['%ss' % kind]['count'])))
if count is not None:
return count
like_count = get_count('preview_like', 'like')
comment_count = get_count(
('preview_comment', 'to_comment', 'to_parent_comment'), 'comment')
comments = [{
'author': comment.get('user', {}).get('username'),
'author_id': comment.get('user', {}).get('id'),
'id': comment.get('id'),
'text': comment.get('text'),
'timestamp': int_or_none(comment.get('created_at')),
} for comment in media.get(
'comments', {}).get('nodes', []) if comment.get('text')]
if not video_url:
edges = try_get(
media, lambda x: x['edge_sidecar_to_children']['edges'],
list) or []
if edges:
entries = []
for edge_num, edge in enumerate(edges, start=1):
node = try_get(edge, lambda x: x['node'], dict)
if not node:
continue
node_video_url = url_or_none(node.get('video_url'))
if not node_video_url:
continue
entries.append({
'id': node.get('shortcode') or node['id'],
'title': 'Video %d' % edge_num,
'url': node_video_url,
'thumbnail': node.get('display_url'),
'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
'view_count': int_or_none(node.get('video_view_count')),
})
return self.playlist_result(
entries, video_id,
'Post by %s' % uploader_id if uploader_id else None,
description)
if not video_url:
video_url = self._og_search_video_url(webpage, secure=False)
formats = [{
'url': video_url,
'width': width,
'height': height,
}]
if not uploader_id:
uploader_id = self._search_regex(
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
webpage, 'uploader id', fatal=False)
if not description:
description = self._search_regex(
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
if description is not None:
description = lowercase_escape(description)
if not thumbnail:
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'formats': formats,
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'uploader_id': uploader_id,
'uploader': uploader,
'like_count': like_count,
'comment_count': comment_count,
'comments': comments,
}
class InstagramPlaylistIE(InfoExtractor):
# A superclass for handling any kind of query based on GraphQL which
# results in a playlist.
_gis_tmpl = None # used to cache GIS request type
def _parse_graphql(self, webpage, item_id):
# Reads a webpage and returns its GraphQL data.
return self._parse_json(
self._search_regex(
r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
item_id)
def _extract_graphql(self, data, url):
# Parses GraphQL queries containing videos and generates a playlist.
def get_count(suffix):
return int_or_none(try_get(
node, lambda x: x['edge_media_' + suffix]['count']))
uploader_id = self._match_id(url)
csrf_token = data['config']['csrf_token']
rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
cursor = ''
for page_num in itertools.count(1):
variables = {
'first': 12,
'after': cursor,
}
variables.update(self._query_vars_for(data))
variables = json.dumps(variables)
if self._gis_tmpl:
gis_tmpls = [self._gis_tmpl]
else:
gis_tmpls = [
'%s' % rhx_gis,
'',
'%s:%s' % (rhx_gis, csrf_token),
'%s:%s:%s' % (rhx_gis, csrf_token, std_headers['User-Agent']),
]
# try all of the ways to generate a GIS query, and not only use the
# first one that works, but cache it for future requests
for gis_tmpl in gis_tmpls:
try:
json_data = self._download_json(
'https://www.instagram.com/graphql/query/', uploader_id,
'Downloading JSON page %d' % page_num, headers={
'X-Requested-With': 'XMLHttpRequest',
'X-Instagram-GIS': hashlib.md5(
('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
}, query={
'query_hash': self._QUERY_HASH,
'variables': variables,
})
media = self._parse_timeline_from(json_data)
self._gis_tmpl = gis_tmpl
break
except ExtractorError as e:
# if it's an error caused by a bad query, and there are
# more GIS templates to try, ignore it and keep trying
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
if gis_tmpl != gis_tmpls[-1]:
continue
raise
edges = media.get('edges')
if not edges or not isinstance(edges, list):
break
for edge in edges:
node = edge.get('node')
if not node or not isinstance(node, dict):
continue
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
continue
video_id = node.get('shortcode')
if not video_id:
continue
info = self.url_result(
'https://instagram.com/p/%s/' % video_id,
ie=InstagramIE.ie_key(), video_id=video_id)
description = try_get(
node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
compat_str)
thumbnail = node.get('thumbnail_src') or node.get('display_src')
timestamp = int_or_none(node.get('taken_at_timestamp'))
comment_count = get_count('to_comment')
like_count = get_count('preview_like')
view_count = int_or_none(node.get('video_view_count'))
info.update({
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'comment_count': comment_count,
'like_count': like_count,
'view_count': view_count,
})
yield info
page_info = media.get('page_info')
if not page_info or not isinstance(page_info, dict):
break
has_next_page = page_info.get('has_next_page')
if not has_next_page:
break
cursor = page_info.get('end_cursor')
if not cursor or not isinstance(cursor, compat_str):
break
def _real_extract(self, url):
user_or_tag = self._match_id(url)
webpage = self._download_webpage(url, user_or_tag)
data = self._parse_graphql(webpage, user_or_tag)
self._set_cookie('instagram.com', 'ig_pr', '1')
return self.playlist_result(
self._extract_graphql(data, url), user_or_tag, user_or_tag)
class InstagramUserIE(InstagramPlaylistIE):
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'https://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_count': 5,
'params': {
'extract_flat': True,
'skip_download': True,
'playlistend': 5,
}
}
_QUERY_HASH = '42323d64886122307be10013ad2dcc44',
@staticmethod
def _parse_timeline_from(data):
# extracts the media timeline data from a GraphQL result
return data['data']['user']['edge_owner_to_timeline_media']
@staticmethod
def _query_vars_for(data):
# returns a dictionary of variables to add to the timeline query based
# on the GraphQL of the original page
return {
'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
}
class InstagramTagIE(InstagramPlaylistIE):
_VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
IE_DESC = 'Instagram hashtag search'
IE_NAME = 'instagram:tag'
_TEST = {
'url': 'https://instagram.com/explore/tags/lolcats',
'info_dict': {
'id': 'lolcats',
'title': 'lolcats',
},
'playlist_count': 50,
'params': {
'extract_flat': True,
'skip_download': True,
'playlistend': 50,
}
}
_QUERY_HASH = 'f92f56d47dc7a55b606908374b43a314',
@staticmethod
def _parse_timeline_from(data):
# extracts the media timeline data from a GraphQL result
return data['data']['hashtag']['edge_hashtag_to_media']
@staticmethod
def _query_vars_for(data):
# returns a dictionary of variables to add to the timeline query based
# on the GraphQL of the original page
return {
'tag_name':
data['entry_data']['TagPage'][0]['graphql']['hashtag']['name']
}
|
unlicense
|
a366d764901754567175a4557adca68c
| 35.959911
| 105
| 0.478759
| 3.963458
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/yourporn.py
|
12
|
2062
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
parse_duration,
urljoin,
)
class YourPornIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sxyprn\.com/post/(?P<id>[^/?#&.]+)'
_TESTS = [{
'url': 'https://sxyprn.com/post/57ffcb2e1179b.html',
'md5': '6f8682b6464033d87acaa7a8ff0c092e',
'info_dict': {
'id': '57ffcb2e1179b',
'ext': 'mp4',
'title': 'md5:c9f43630bd968267672651ba905a7d35',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 165,
'age_limit': 18,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://sxyprn.com/post/57ffcb2e1179b.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
parts = self._parse_json(
self._search_regex(
r'data-vnfo=(["\'])(?P<data>{.+?})\1', webpage, 'data info',
group='data'),
video_id)[video_id].split('/')
num = 0
for c in parts[6] + parts[7]:
if c.isnumeric():
num += int(c)
parts[5] = compat_str(int(parts[5]) - num)
parts[1] += '8'
video_url = urljoin(url, '/'.join(parts))
title = (self._search_regex(
r'<[^>]+\bclass=["\']PostEditTA[^>]+>([^<]+)', webpage, 'title',
default=None) or self._og_search_description(webpage)).strip()
thumbnail = self._og_search_thumbnail(webpage)
duration = parse_duration(self._search_regex(
r'duration\s*:\s*<[^>]+>([\d:]+)', webpage, 'duration',
default=None))
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': 18,
'ext': 'mp4',
}
|
unlicense
|
921bb6e4b26e6b2c9b5bac242b6c95ea
| 29.776119
| 76
| 0.489816
| 3.425249
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/apa.py
|
19
|
3173
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
url_or_none,
)
class APAIE(InfoExtractor):
_VALID_URL = r'https?://[^/]+\.apa\.at/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
}, {
'url': 'https://uvp-apapublisher.sf.apa.at/embed/2f94e9e6-d945-4db2-9548-f9a41ebf7b78',
'only_matching': True,
}, {
'url': 'http://uvp-rma.sf.apa.at/embed/70404cca-2f47-4855-bbb8-20b1fae58f76',
'only_matching': True,
}, {
'url': 'http://uvp-kleinezeitung.sf.apa.at/embed/f1c44979-dba2-4ebf-b021-e4cf2cac3c81',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
jwplatform_id = self._search_regex(
r'media[iI]d\s*:\s*["\'](?P<id>[a-zA-Z0-9]{8})', webpage,
'jwplatform id', default=None)
if jwplatform_id:
return self.url_result(
'jwplatform:' + jwplatform_id, ie='JWPlatform',
video_id=video_id)
sources = self._parse_json(
self._search_regex(
r'sources\s*=\s*(\[.+?\])\s*;', webpage, 'sources'),
video_id, transform_source=js_to_json)
formats = []
for source in sources:
if not isinstance(source, dict):
continue
source_url = url_or_none(source.get('file'))
if not source_url:
continue
ext = determine_ext(source_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': source_url,
})
self._sort_formats(formats)
thumbnail = self._search_regex(
r'image\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
'thumbnail', fatal=False, group='url')
return {
'id': video_id,
'title': video_id,
'thumbnail': thumbnail,
'formats': formats,
}
|
unlicense
|
b3568d6dda3d7b350b901c505d9ee76d
| 32.744681
| 149
| 0.500315
| 3.094634
| false
| false
| false
| false
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/lenta.py
|
28
|
1682
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class LentaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lenta\.ru/[^/]+/\d+/\d+/\d+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://lenta.ru/news/2018/03/22/savshenko_go/',
'info_dict': {
'id': '964400',
'ext': 'mp4',
'title': 'Надежду Савченко задержали',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 61,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
# EaglePlatform iframe embed
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'vid\s*:\s*["\']?(\d+)', webpage, 'eagleplatform id',
default=None)
if video_id:
return self.url_result(
'eagleplatform:lentaru.media.eagleplatform.com:%s' % video_id,
ie='EaglePlatform', video_id=video_id)
return self.url_result(url, ie='Generic')
|
unlicense
|
5c10c70a1b83265c2c0b37423ade22be
| 29.849057
| 83
| 0.48318
| 3.162476
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.