mixin.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. # Copyright 2010-2023 Kurt McKee <contactme@kurtmckee.org>
  2. # Copyright 2002-2008 Mark Pilgrim
  3. # All rights reserved.
  4. #
  5. # This file is a part of feedparser.
  6. #
  7. # Redistribution and use in source and binary forms, with or without
  8. # modification, are permitted provided that the following conditions are met:
  9. #
  10. # * Redistributions of source code must retain the above copyright notice,
  11. # this list of conditions and the following disclaimer.
  12. # * Redistributions in binary form must reproduce the above copyright notice,
  13. # this list of conditions and the following disclaimer in the documentation
  14. # and/or other materials provided with the distribution.
  15. #
  16. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
  17. # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  20. # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  21. # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  22. # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  23. # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  24. # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  25. # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  26. # POSSIBILITY OF SUCH DAMAGE.
  27. import base64
  28. import binascii
  29. import copy
  30. import html.entities
  31. import re
  32. import xml.sax.saxutils
  33. from .html import _cp1252
  34. from .namespaces import _base, cc, dc, georss, itunes, mediarss, psc
  35. from .sanitizer import _sanitize_html, _HTMLSanitizer
  36. from .util import FeedParserDict
  37. from .urls import _urljoin, make_safe_absolute_uri, resolve_relative_uris
  38. class _FeedParserMixin(
  39. _base.Namespace,
  40. cc.Namespace,
  41. dc.Namespace,
  42. georss.Namespace,
  43. itunes.Namespace,
  44. mediarss.Namespace,
  45. psc.Namespace,
  46. ):
  47. namespaces = {
  48. '': '',
  49. 'http://backend.userland.com/rss': '',
  50. 'http://blogs.law.harvard.edu/tech/rss': '',
  51. 'http://purl.org/rss/1.0/': '',
  52. 'http://my.netscape.com/rdf/simple/0.9/': '',
  53. 'http://example.com/newformat#': '',
  54. 'http://example.com/necho': '',
  55. 'http://purl.org/echo/': '',
  56. 'uri/of/echo/namespace#': '',
  57. 'http://purl.org/pie/': '',
  58. 'http://purl.org/atom/ns#': '',
  59. 'http://www.w3.org/2005/Atom': '',
  60. 'http://purl.org/rss/1.0/modules/rss091#': '',
  61. 'http://webns.net/mvcb/': 'admin',
  62. 'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
  63. 'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
  64. 'http://media.tangent.org/rss/1.0/': 'audio',
  65. 'http://backend.userland.com/blogChannelModule': 'blogChannel',
  66. 'http://creativecommons.org/ns#license': 'cc',
  67. 'http://web.resource.org/cc/': 'cc',
  68. 'http://cyber.law.harvard.edu/rss/creativeCommonsRssModule.html': 'creativeCommons',
  69. 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
  70. 'http://purl.org/rss/1.0/modules/company': 'co',
  71. 'http://purl.org/rss/1.0/modules/content/': 'content',
  72. 'http://my.theinfo.org/changed/1.0/rss/': 'cp',
  73. 'http://purl.org/dc/elements/1.1/': 'dc',
  74. 'http://purl.org/dc/terms/': 'dcterms',
  75. 'http://purl.org/rss/1.0/modules/email/': 'email',
  76. 'http://purl.org/rss/1.0/modules/event/': 'ev',
  77. 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
  78. 'http://freshmeat.net/rss/fm/': 'fm',
  79. 'http://xmlns.com/foaf/0.1/': 'foaf',
  80. 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
  81. 'http://www.georss.org/georss': 'georss',
  82. 'http://www.opengis.net/gml': 'gml',
  83. 'http://postneo.com/icbm/': 'icbm',
  84. 'http://purl.org/rss/1.0/modules/image/': 'image',
  85. 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
  86. 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
  87. 'http://purl.org/rss/1.0/modules/link/': 'l',
  88. 'http://search.yahoo.com/mrss': 'media',
  89. # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
  90. 'http://search.yahoo.com/mrss/': 'media',
  91. 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
  92. 'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
  93. 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
  94. 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
  95. 'http://purl.org/rss/1.0/modules/reference/': 'ref',
  96. 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
  97. 'http://purl.org/rss/1.0/modules/search/': 'search',
  98. 'http://purl.org/rss/1.0/modules/slash/': 'slash',
  99. 'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
  100. 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
  101. 'http://hacks.benhammersley.com/rss/streaming/': 'str',
  102. 'http://purl.org/rss/1.0/modules/subscription/': 'sub',
  103. 'http://purl.org/rss/1.0/modules/syndication/': 'sy',
  104. 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
  105. 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
  106. 'http://purl.org/rss/1.0/modules/threading/': 'thr',
  107. 'http://purl.org/rss/1.0/modules/textinput/': 'ti',
  108. 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
  109. 'http://wellformedweb.org/commentAPI/': 'wfw',
  110. 'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
  111. 'http://www.w3.org/1999/xhtml': 'xhtml',
  112. 'http://www.w3.org/1999/xlink': 'xlink',
  113. 'http://www.w3.org/XML/1998/namespace': 'xml',
  114. 'http://podlove.org/simple-chapters': 'psc',
  115. }
  116. _matchnamespaces = {}
  117. can_be_relative_uri = {
  118. 'comments',
  119. 'docs',
  120. 'href',
  121. 'icon',
  122. 'id',
  123. 'link',
  124. 'logo',
  125. 'url',
  126. 'wfw_comment',
  127. 'wfw_commentrss',
  128. }
  129. can_contain_relative_uris = {
  130. 'content',
  131. 'copyright',
  132. 'description',
  133. 'info',
  134. 'rights',
  135. 'subtitle',
  136. 'summary',
  137. 'tagline',
  138. 'title',
  139. }
  140. can_contain_dangerous_markup = {
  141. 'content',
  142. 'copyright',
  143. 'description',
  144. 'info',
  145. 'rights',
  146. 'subtitle',
  147. 'summary',
  148. 'tagline',
  149. 'title',
  150. }
  151. html_types = {
  152. 'application/xhtml+xml',
  153. 'text/html',
  154. }
  155. def __init__(self):
  156. if not self._matchnamespaces:
  157. for k, v in self.namespaces.items():
  158. self._matchnamespaces[k.lower()] = v
  159. self.feeddata = FeedParserDict() # feed-level data
  160. self.entries = [] # list of entry-level data
  161. self.version = '' # feed type/version, see SUPPORTED_VERSIONS
  162. self.namespaces_in_use = {} # dictionary of namespaces defined by the feed
  163. # the following are used internally to track state;
  164. # this is really out of control and should be refactored
  165. self.infeed = 0
  166. self.inentry = 0
  167. self.incontent = 0
  168. self.intextinput = 0
  169. self.inimage = 0
  170. self.inauthor = 0
  171. self.incontributor = 0
  172. self.inpublisher = 0
  173. self.insource = 0
  174. self.sourcedata = FeedParserDict()
  175. self.contentparams = FeedParserDict()
  176. self._summaryKey = None
  177. self.namespacemap = {}
  178. self.elementstack = []
  179. self.basestack = []
  180. self.langstack = []
  181. self.svgOK = 0
  182. self.title_depth = -1
  183. self.depth = 0
  184. self.hasContent = 0
  185. if self.lang:
  186. self.feeddata['language'] = self.lang.replace('_', '-')
  187. # A map of the following form:
  188. # {
  189. # object_that_value_is_set_on: {
  190. # property_name: depth_of_node_property_was_extracted_from,
  191. # other_property: depth_of_node_property_was_extracted_from,
  192. # },
  193. # }
  194. self.property_depth_map = {}
  195. super(_FeedParserMixin, self).__init__()
  196. def _normalize_attributes(self, kv):
  197. raise NotImplementedError
  198. def unknown_starttag(self, tag, attrs):
  199. # increment depth counter
  200. self.depth += 1
  201. # normalize attrs
  202. attrs = [self._normalize_attributes(attr) for attr in attrs]
  203. # track xml:base and xml:lang
  204. attrs_d = dict(attrs)
  205. baseuri = attrs_d.get('xml:base', attrs_d.get('base')) or self.baseuri
  206. if isinstance(baseuri, bytes):
  207. baseuri = baseuri.decode(self.encoding, 'ignore')
  208. # ensure that self.baseuri is always an absolute URI that
  209. # uses a whitelisted URI scheme (e.g. not `javscript:`)
  210. if self.baseuri:
  211. self.baseuri = make_safe_absolute_uri(self.baseuri, baseuri) or self.baseuri
  212. else:
  213. self.baseuri = _urljoin(self.baseuri, baseuri)
  214. lang = attrs_d.get('xml:lang', attrs_d.get('lang'))
  215. if lang == '':
  216. # xml:lang could be explicitly set to '', we need to capture that
  217. lang = None
  218. elif lang is None:
  219. # if no xml:lang is specified, use parent lang
  220. lang = self.lang
  221. if lang:
  222. if tag in ('feed', 'rss', 'rdf:RDF'):
  223. self.feeddata['language'] = lang.replace('_', '-')
  224. self.lang = lang
  225. self.basestack.append(self.baseuri)
  226. self.langstack.append(lang)
  227. # track namespaces
  228. for prefix, uri in attrs:
  229. if prefix.startswith('xmlns:'):
  230. self.track_namespace(prefix[6:], uri)
  231. elif prefix == 'xmlns':
  232. self.track_namespace(None, uri)
  233. # track inline content
  234. if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
  235. if tag in ('xhtml:div', 'div'):
  236. return # typepad does this 10/2007
  237. # element declared itself as escaped markup, but it isn't really
  238. self.contentparams['type'] = 'application/xhtml+xml'
  239. if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
  240. if tag.find(':') != -1:
  241. prefix, tag = tag.split(':', 1)
  242. namespace = self.namespaces_in_use.get(prefix, '')
  243. if tag == 'math' and namespace == 'http://www.w3.org/1998/Math/MathML':
  244. attrs.append(('xmlns', namespace))
  245. if tag == 'svg' and namespace == 'http://www.w3.org/2000/svg':
  246. attrs.append(('xmlns', namespace))
  247. if tag == 'svg':
  248. self.svgOK += 1
  249. return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
  250. # match namespaces
  251. if tag.find(':') != -1:
  252. prefix, suffix = tag.split(':', 1)
  253. else:
  254. prefix, suffix = '', tag
  255. prefix = self.namespacemap.get(prefix, prefix)
  256. if prefix:
  257. prefix = prefix + '_'
  258. # Special hack for better tracking of empty textinput/image elements in
  259. # illformed feeds.
  260. if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
  261. self.intextinput = 0
  262. if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
  263. self.inimage = 0
  264. # call special handler (if defined) or default handler
  265. methodname = '_start_' + prefix + suffix
  266. try:
  267. method = getattr(self, methodname)
  268. return method(attrs_d)
  269. except AttributeError:
  270. # Since there's no handler or something has gone wrong we
  271. # explicitly add the element and its attributes.
  272. unknown_tag = prefix + suffix
  273. if len(attrs_d) == 0:
  274. # No attributes so merge it into the enclosing dictionary
  275. return self.push(unknown_tag, 1)
  276. else:
  277. # Has attributes so create it in its own dictionary
  278. context = self._get_context()
  279. context[unknown_tag] = attrs_d
  280. def unknown_endtag(self, tag):
  281. # match namespaces
  282. if tag.find(':') != -1:
  283. prefix, suffix = tag.split(':', 1)
  284. else:
  285. prefix, suffix = '', tag
  286. prefix = self.namespacemap.get(prefix, prefix)
  287. if prefix:
  288. prefix = prefix + '_'
  289. if suffix == 'svg' and self.svgOK:
  290. self.svgOK -= 1
  291. # call special handler (if defined) or default handler
  292. methodname = '_end_' + prefix + suffix
  293. try:
  294. if self.svgOK:
  295. raise AttributeError()
  296. method = getattr(self, methodname)
  297. method()
  298. except AttributeError:
  299. self.pop(prefix + suffix)
  300. # track inline content
  301. if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
  302. # element declared itself as escaped markup, but it isn't really
  303. if tag in ('xhtml:div', 'div'):
  304. return # typepad does this 10/2007
  305. self.contentparams['type'] = 'application/xhtml+xml'
  306. if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
  307. tag = tag.split(':')[-1]
  308. self.handle_data('</%s>' % tag, escape=0)
  309. # track xml:base and xml:lang going out of scope
  310. if self.basestack:
  311. self.basestack.pop()
  312. if self.basestack and self.basestack[-1]:
  313. self.baseuri = self.basestack[-1]
  314. if self.langstack:
  315. self.langstack.pop()
  316. if self.langstack: # and (self.langstack[-1] is not None):
  317. self.lang = self.langstack[-1]
  318. self.depth -= 1
  319. def handle_charref(self, ref):
  320. # Called for each character reference, e.g. for '&#160;', ref is '160'
  321. if not self.elementstack:
  322. return
  323. ref = ref.lower()
  324. if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
  325. text = '&#%s;' % ref
  326. else:
  327. if ref[0] == 'x':
  328. c = int(ref[1:], 16)
  329. else:
  330. c = int(ref)
  331. text = chr(c).encode('utf-8')
  332. self.elementstack[-1][2].append(text)
  333. def handle_entityref(self, ref):
  334. # Called for each entity reference, e.g. for '&copy;', ref is 'copy'
  335. if not self.elementstack:
  336. return
  337. if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
  338. text = '&%s;' % ref
  339. elif ref in self.entities:
  340. text = self.entities[ref]
  341. if text.startswith('&#') and text.endswith(';'):
  342. return self.handle_entityref(text)
  343. else:
  344. try:
  345. html.entities.name2codepoint[ref]
  346. except KeyError:
  347. text = '&%s;' % ref
  348. else:
  349. text = chr(html.entities.name2codepoint[ref]).encode('utf-8')
  350. self.elementstack[-1][2].append(text)
  351. def handle_data(self, text, escape=1):
  352. # Called for each block of plain text, i.e. outside of any tag and
  353. # not containing any character or entity references
  354. if not self.elementstack:
  355. return
  356. if escape and self.contentparams.get('type') == 'application/xhtml+xml':
  357. text = xml.sax.saxutils.escape(text)
  358. self.elementstack[-1][2].append(text)
  359. def handle_comment(self, text):
  360. # Called for each comment, e.g. <!-- insert message here -->
  361. pass
  362. def handle_pi(self, text):
  363. # Called for each processing instruction, e.g. <?instruction>
  364. pass
  365. def handle_decl(self, text):
  366. pass
  367. def parse_declaration(self, i):
  368. # Override internal declaration handler to handle CDATA blocks.
  369. if self.rawdata[i:i+9] == '<![CDATA[':
  370. k = self.rawdata.find(']]>', i)
  371. if k == -1:
  372. # CDATA block began but didn't finish
  373. k = len(self.rawdata)
  374. return k
  375. self.handle_data(xml.sax.saxutils.escape(self.rawdata[i+9:k]), 0)
  376. return k+3
  377. else:
  378. k = self.rawdata.find('>', i)
  379. if k >= 0:
  380. return k+1
  381. else:
  382. # We have an incomplete CDATA block.
  383. return k
  384. @staticmethod
  385. def map_content_type(content_type):
  386. content_type = content_type.lower()
  387. if content_type == 'text' or content_type == 'plain':
  388. content_type = 'text/plain'
  389. elif content_type == 'html':
  390. content_type = 'text/html'
  391. elif content_type == 'xhtml':
  392. content_type = 'application/xhtml+xml'
  393. return content_type
  394. def track_namespace(self, prefix, uri):
  395. loweruri = uri.lower()
  396. if not self.version:
  397. if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
  398. self.version = 'rss090'
  399. elif loweruri == 'http://purl.org/rss/1.0/':
  400. self.version = 'rss10'
  401. elif loweruri == 'http://www.w3.org/2005/atom':
  402. self.version = 'atom10'
  403. if loweruri.find('backend.userland.com/rss') != -1:
  404. # match any backend.userland.com namespace
  405. uri = 'http://backend.userland.com/rss'
  406. loweruri = uri
  407. if loweruri in self._matchnamespaces:
  408. self.namespacemap[prefix] = self._matchnamespaces[loweruri]
  409. self.namespaces_in_use[self._matchnamespaces[loweruri]] = uri
  410. else:
  411. self.namespaces_in_use[prefix or ''] = uri
  412. def resolve_uri(self, uri):
  413. return _urljoin(self.baseuri or '', uri)
  414. @staticmethod
  415. def decode_entities(element, data):
  416. return data
  417. @staticmethod
  418. def strattrs(attrs):
  419. return ''.join(
  420. ' %s="%s"' % (t[0], xml.sax.saxutils.escape(t[1], {'"': '&quot;'}))
  421. for t in attrs
  422. )
  423. def push(self, element, expecting_text):
  424. self.elementstack.append([element, expecting_text, []])
  425. def pop(self, element, strip_whitespace=1):
  426. if not self.elementstack:
  427. return
  428. if self.elementstack[-1][0] != element:
  429. return
  430. element, expecting_text, pieces = self.elementstack.pop()
  431. # Ensure each piece is a str for Python 3
  432. for (i, v) in enumerate(pieces):
  433. if isinstance(v, bytes):
  434. pieces[i] = v.decode('utf-8')
  435. if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml':
  436. # remove enclosing child element, but only if it is a <div> and
  437. # only if all the remaining content is nested underneath it.
  438. # This means that the divs would be retained in the following:
  439. # <div>foo</div><div>bar</div>
  440. while pieces and len(pieces) > 1 and not pieces[-1].strip():
  441. del pieces[-1]
  442. while pieces and len(pieces) > 1 and not pieces[0].strip():
  443. del pieces[0]
  444. if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1] == '</div>':
  445. depth = 0
  446. for piece in pieces[:-1]:
  447. if piece.startswith('</'):
  448. depth -= 1
  449. if depth == 0:
  450. break
  451. elif piece.startswith('<') and not piece.endswith('/>'):
  452. depth += 1
  453. else:
  454. pieces = pieces[1:-1]
  455. output = ''.join(pieces)
  456. if strip_whitespace:
  457. output = output.strip()
  458. if not expecting_text:
  459. return output
  460. # decode base64 content
  461. if base64 and self.contentparams.get('base64', 0):
  462. try:
  463. output = base64.decodebytes(output.encode('utf8')).decode('utf8')
  464. except (binascii.Error, binascii.Incomplete, UnicodeDecodeError):
  465. pass
  466. # resolve relative URIs
  467. if (element in self.can_be_relative_uri) and output:
  468. # do not resolve guid elements with isPermalink="false"
  469. if not element == 'id' or self.guidislink:
  470. output = self.resolve_uri(output)
  471. # decode entities within embedded markup
  472. if not self.contentparams.get('base64', 0):
  473. output = self.decode_entities(element, output)
  474. # some feed formats require consumers to guess
  475. # whether the content is html or plain text
  476. if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain':
  477. if self.looks_like_html(output):
  478. self.contentparams['type'] = 'text/html'
  479. # remove temporary cruft from contentparams
  480. try:
  481. del self.contentparams['mode']
  482. except KeyError:
  483. pass
  484. try:
  485. del self.contentparams['base64']
  486. except KeyError:
  487. pass
  488. is_htmlish = self.map_content_type(self.contentparams.get('type', 'text/html')) in self.html_types
  489. # resolve relative URIs within embedded markup
  490. if is_htmlish and self.resolve_relative_uris:
  491. if element in self.can_contain_relative_uris:
  492. output = resolve_relative_uris(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
  493. # sanitize embedded markup
  494. if is_htmlish and self.sanitize_html:
  495. if element in self.can_contain_dangerous_markup:
  496. output = _sanitize_html(output, self.encoding, self.contentparams.get('type', 'text/html'))
  497. if self.encoding and isinstance(output, bytes):
  498. output = output.decode(self.encoding, 'ignore')
  499. # address common error where people take data that is already
  500. # utf-8, presume that it is iso-8859-1, and re-encode it.
  501. if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and not isinstance(output, bytes):
  502. try:
  503. output = output.encode('iso-8859-1').decode('utf-8')
  504. except (UnicodeEncodeError, UnicodeDecodeError):
  505. pass
  506. # map win-1252 extensions to the proper code points
  507. if not isinstance(output, bytes):
  508. output = output.translate(_cp1252)
  509. # categories/tags/keywords/whatever are handled in _end_category or
  510. # _end_tags or _end_itunes_keywords
  511. if element in ('category', 'tags', 'itunes_keywords'):
  512. return output
  513. if element == 'title' and -1 < self.title_depth <= self.depth:
  514. return output
  515. # store output in appropriate place(s)
  516. if self.inentry and not self.insource:
  517. if element == 'content':
  518. self.entries[-1].setdefault(element, [])
  519. contentparams = copy.deepcopy(self.contentparams)
  520. contentparams['value'] = output
  521. self.entries[-1][element].append(contentparams)
  522. elif element == 'link':
  523. if not self.inimage:
  524. # query variables in urls in link elements are improperly
  525. # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
  526. # unhandled character references. fix this special case.
  527. output = output.replace('&amp;', '&')
  528. output = re.sub("&([A-Za-z0-9_]+);", r"&\g<1>", output)
  529. self.entries[-1][element] = output
  530. if output:
  531. self.entries[-1]['links'][-1]['href'] = output
  532. else:
  533. if element == 'description':
  534. element = 'summary'
  535. old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
  536. if old_value_depth is None or self.depth <= old_value_depth:
  537. self.property_depth_map[self.entries[-1]][element] = self.depth
  538. self.entries[-1][element] = output
  539. if self.incontent:
  540. contentparams = copy.deepcopy(self.contentparams)
  541. contentparams['value'] = output
  542. self.entries[-1][element + '_detail'] = contentparams
  543. elif self.infeed or self.insource: # and (not self.intextinput) and (not self.inimage):
  544. context = self._get_context()
  545. if element == 'description':
  546. element = 'subtitle'
  547. context[element] = output
  548. if element == 'link':
  549. # fix query variables; see above for the explanation
  550. output = re.sub("&([A-Za-z0-9_]+);", r"&\g<1>", output)
  551. context[element] = output
  552. context['links'][-1]['href'] = output
  553. elif self.incontent:
  554. contentparams = copy.deepcopy(self.contentparams)
  555. contentparams['value'] = output
  556. context[element + '_detail'] = contentparams
  557. return output
  558. def push_content(self, tag, attrs_d, default_content_type, expecting_text):
  559. self.incontent += 1
  560. if self.lang:
  561. self.lang = self.lang.replace('_', '-')
  562. self.contentparams = FeedParserDict({
  563. 'type': self.map_content_type(attrs_d.get('type', default_content_type)),
  564. 'language': self.lang,
  565. 'base': self.baseuri})
  566. self.contentparams['base64'] = self._is_base64(attrs_d, self.contentparams)
  567. self.push(tag, expecting_text)
  568. def pop_content(self, tag):
  569. value = self.pop(tag)
  570. self.incontent -= 1
  571. self.contentparams.clear()
  572. return value
  573. # a number of elements in a number of RSS variants are nominally plain
  574. # text, but this is routinely ignored. This is an attempt to detect
  575. # the most common cases. As false positives often result in silent
  576. # data loss, this function errs on the conservative side.
  577. @staticmethod
  578. def looks_like_html(s):
  579. """
  580. :type s: str
  581. :rtype: bool
  582. """
  583. # must have a close tag or an entity reference to qualify
  584. if not (re.search(r'</(\w+)>', s) or re.search(r'&#?\w+;', s)):
  585. return False
  586. # all tags must be in a restricted subset of valid HTML tags
  587. if any((t for t in re.findall(r'</?(\w+)', s) if t.lower() not in _HTMLSanitizer.acceptable_elements)):
  588. return False
  589. # all entities must have been defined as valid HTML entities
  590. if any((e for e in re.findall(r'&(\w+);', s) if e not in html.entities.entitydefs)):
  591. return False
  592. return True
  593. def _map_to_standard_prefix(self, name):
  594. colonpos = name.find(':')
  595. if colonpos != -1:
  596. prefix = name[:colonpos]
  597. suffix = name[colonpos+1:]
  598. prefix = self.namespacemap.get(prefix, prefix)
  599. name = prefix + ':' + suffix
  600. return name
  601. def _get_attribute(self, attrs_d, name):
  602. return attrs_d.get(self._map_to_standard_prefix(name))
  603. def _is_base64(self, attrs_d, contentparams):
  604. if attrs_d.get('mode', '') == 'base64':
  605. return 1
  606. if self.contentparams['type'].startswith('text/'):
  607. return 0
  608. if self.contentparams['type'].endswith('+xml'):
  609. return 0
  610. if self.contentparams['type'].endswith('/xml'):
  611. return 0
  612. return 1
  613. @staticmethod
  614. def _enforce_href(attrs_d):
  615. href = attrs_d.get('url', attrs_d.get('uri', attrs_d.get('href', None)))
  616. if href:
  617. try:
  618. del attrs_d['url']
  619. except KeyError:
  620. pass
  621. try:
  622. del attrs_d['uri']
  623. except KeyError:
  624. pass
  625. attrs_d['href'] = href
  626. return attrs_d
  627. def _save(self, key, value, overwrite=False):
  628. context = self._get_context()
  629. if overwrite:
  630. context[key] = value
  631. else:
  632. context.setdefault(key, value)
  633. def _get_context(self):
  634. if self.insource:
  635. context = self.sourcedata
  636. elif self.inimage and 'image' in self.feeddata:
  637. context = self.feeddata['image']
  638. elif self.intextinput:
  639. context = self.feeddata['textinput']
  640. elif self.inentry:
  641. context = self.entries[-1]
  642. else:
  643. context = self.feeddata
  644. return context
  645. def _save_author(self, key, value, prefix='author'):
  646. context = self._get_context()
  647. context.setdefault(prefix + '_detail', FeedParserDict())
  648. context[prefix + '_detail'][key] = value
  649. self._sync_author_detail()
  650. context.setdefault('authors', [FeedParserDict()])
  651. context['authors'][-1][key] = value
  652. def _save_contributor(self, key, value):
  653. context = self._get_context()
  654. context.setdefault('contributors', [FeedParserDict()])
  655. context['contributors'][-1][key] = value
  656. def _sync_author_detail(self, key='author'):
  657. context = self._get_context()
  658. detail = context.get('%ss' % key, [FeedParserDict()])[-1]
  659. if detail:
  660. name = detail.get('name')
  661. email = detail.get('email')
  662. if name and email:
  663. context[key] = '%s (%s)' % (name, email)
  664. elif name:
  665. context[key] = name
  666. elif email:
  667. context[key] = email
  668. else:
  669. author, email = context.get(key), None
  670. if not author:
  671. return
  672. emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
  673. if emailmatch:
  674. email = emailmatch.group(0)
  675. # probably a better way to do the following, but it passes
  676. # all the tests
  677. author = author.replace(email, '')
  678. author = author.replace('()', '')
  679. author = author.replace('<>', '')
  680. author = author.replace('&lt;&gt;', '')
  681. author = author.strip()
  682. if author and (author[0] == '('):
  683. author = author[1:]
  684. if author and (author[-1] == ')'):
  685. author = author[:-1]
  686. author = author.strip()
  687. if author or email:
  688. context.setdefault('%s_detail' % key, detail)
  689. if author:
  690. detail['name'] = author
  691. if email:
  692. detail['email'] = email
  693. def _add_tag(self, term, scheme, label):
  694. context = self._get_context()
  695. tags = context.setdefault('tags', [])
  696. if (not term) and (not scheme) and (not label):
  697. return
  698. value = FeedParserDict(term=term, scheme=scheme, label=label)
  699. if value not in tags:
  700. tags.append(value)
  701. def _start_tags(self, attrs_d):
  702. # This is a completely-made up element. Its semantics are determined
  703. # only by a single feed that precipitated bug report 392 on Google Code.
  704. # In short, this is junk code.
  705. self.push('tags', 1)
  706. def _end_tags(self):
  707. for term in self.pop('tags').split(','):
  708. self._add_tag(term.strip(), None, None)