linkifier.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573
  1. import re
  2. from urllib.parse import quote
  3. from bleach import callbacks as linkify_callbacks
  4. from bleach import html5lib_shim
  5. #: List of default callbacks
  6. DEFAULT_CALLBACKS = [linkify_callbacks.nofollow]
  7. TLDS = """ac ad ae aero af ag ai al am an ao aq ar arpa as asia at au aw ax az
  8. ba bb bd be bf bg bh bi biz bj bm bn bo br bs bt bv bw by bz ca cat
  9. cc cd cf cg ch ci ck cl cm cn co com coop cr cu cv cx cy cz de dj dk
  10. dm do dz ec edu ee eg er es et eu fi fj fk fm fo fr ga gb gd ge gf gg
  11. gh gi gl gm gn gov gp gq gr gs gt gu gw gy hk hm hn hr ht hu id ie il
  12. im in info int io iq ir is it je jm jo jobs jp ke kg kh ki km kn kp
  13. kr kw ky kz la lb lc li lk lr ls lt lu lv ly ma mc md me mg mh mil mk
  14. ml mm mn mo mobi mp mq mr ms mt mu museum mv mw mx my mz na name nc ne
  15. net nf ng ni nl no np nr nu nz om org pa pe pf pg ph pk pl pm pn post
  16. pr pro ps pt pw py qa re ro rs ru rw sa sb sc sd se sg sh si sj sk sl
  17. sm sn so sr ss st su sv sx sy sz tc td tel tf tg th tj tk tl tm tn to
  18. tp tr travel tt tv tw tz ua ug uk us uy uz va vc ve vg vi vn vu wf ws
  19. xn xxx ye yt yu za zm zw""".split()
  20. # Make sure that .com doesn't get matched by .co first
  21. TLDS.reverse()
  22. def build_url_re(tlds=TLDS, protocols=html5lib_shim.allowed_protocols):
  23. """Builds the url regex used by linkifier
  24. If you want a different set of tlds or allowed protocols, pass those in
  25. and stomp on the existing ``url_re``::
  26. from bleach import linkifier
  27. my_url_re = linkifier.build_url_re(my_tlds_list, my_protocols)
  28. linker = LinkifyFilter(url_re=my_url_re)
  29. """
  30. return re.compile(
  31. r"""\(* # Match any opening parentheses.
  32. \b(?<![@.])(?:(?:{0}):/{{0,3}}(?:(?:\w+:)?\w+@)?)? # http://
  33. ([\w-]+\.)+(?:{1})(?:\:[0-9]+)?(?!\.\w)\b # xx.yy.tld(:##)?
  34. (?:[/?][^\s\{{\}}\|\\\^\[\]`<>"]*)?
  35. # /path/zz (excluding "unsafe" chars from RFC 1738,
  36. # except for # and ~, which happen in practice)
  37. """.format(
  38. "|".join(sorted(protocols)), "|".join(sorted(tlds))
  39. ),
  40. re.IGNORECASE | re.VERBOSE | re.UNICODE,
  41. )
  42. URL_RE = build_url_re()
  43. PROTO_RE = re.compile(r"^[\w-]+:/{0,3}", re.IGNORECASE)
  44. def build_email_re(tlds=TLDS):
  45. """Builds the email regex used by linkifier
  46. If you want a different set of tlds, pass those in and stomp on the existing ``email_re``::
  47. from bleach import linkifier
  48. my_email_re = linkifier.build_email_re(my_tlds_list)
  49. linker = LinkifyFilter(email_re=my_url_re)
  50. """
  51. # open and closing braces doubled below for format string
  52. return re.compile(
  53. r"""(?<!//)
  54. (([-!#$%&'*+/=?^_`{{}}|~0-9A-Z]+
  55. (\.[-!#$%&'*+/=?^_`{{}}|~0-9A-Z]+)* # dot-atom
  56. |^"([\001-\010\013\014\016-\037!#-\[\]-\177]
  57. |\\[\001-\011\013\014\016-\177])*" # quoted-string
  58. )@(?:[A-Z0-9](?:[A-Z0-9-]{{0,61}}[A-Z0-9])?\.)+(?:{0})) # domain
  59. """.format(
  60. "|".join(tlds)
  61. ),
  62. re.IGNORECASE | re.MULTILINE | re.VERBOSE,
  63. )
  64. EMAIL_RE = build_email_re()
  65. class Linker:
  66. """Convert URL-like strings in an HTML fragment to links
  67. This function converts strings that look like URLs, domain names and email
  68. addresses in text that may be an HTML fragment to links, while preserving:
  69. 1. links already in the string
  70. 2. urls found in attributes
  71. 3. email addresses
  72. linkify does a best-effort approach and tries to recover from bad
  73. situations due to crazy text.
  74. """
  75. def __init__(
  76. self,
  77. callbacks=DEFAULT_CALLBACKS,
  78. skip_tags=None,
  79. parse_email=False,
  80. url_re=URL_RE,
  81. email_re=EMAIL_RE,
  82. recognized_tags=html5lib_shim.HTML_TAGS,
  83. ):
  84. """Creates a Linker instance
  85. :arg list callbacks: list of callbacks to run when adjusting tag attributes;
  86. defaults to ``bleach.linkifier.DEFAULT_CALLBACKS``
  87. :arg list skip_tags: list of tags that you don't want to linkify the
  88. contents of; for example, you could set this to ``['pre']`` to skip
  89. linkifying contents of ``pre`` tags
  90. :arg bool parse_email: whether or not to linkify email addresses
  91. :arg url_re: url matching regex
  92. :arg email_re: email matching regex
  93. :arg list recognized_tags: the list of tags that linkify knows about;
  94. everything else gets escaped
  95. :returns: linkified text as unicode
  96. """
  97. self.callbacks = callbacks
  98. self.skip_tags = skip_tags
  99. self.parse_email = parse_email
  100. self.url_re = url_re
  101. self.email_re = email_re
  102. # Create a parser/tokenizer that allows all HTML tags and escapes
  103. # anything not in that list.
  104. self.parser = html5lib_shim.BleachHTMLParser(
  105. tags=recognized_tags,
  106. strip=False,
  107. consume_entities=True,
  108. namespaceHTMLElements=False,
  109. )
  110. self.walker = html5lib_shim.getTreeWalker("etree")
  111. self.serializer = html5lib_shim.BleachHTMLSerializer(
  112. quote_attr_values="always",
  113. omit_optional_tags=False,
  114. # linkify does not sanitize
  115. sanitize=False,
  116. # linkify preserves attr order
  117. alphabetical_attributes=False,
  118. )
  119. def linkify(self, text):
  120. """Linkify specified text
  121. :arg str text: the text to add links to
  122. :returns: linkified text as unicode
  123. :raises TypeError: if ``text`` is not a text type
  124. """
  125. if not isinstance(text, str):
  126. raise TypeError("argument must be of text type")
  127. if not text:
  128. return ""
  129. dom = self.parser.parseFragment(text)
  130. filtered = LinkifyFilter(
  131. source=self.walker(dom),
  132. callbacks=self.callbacks,
  133. skip_tags=self.skip_tags,
  134. parse_email=self.parse_email,
  135. url_re=self.url_re,
  136. email_re=self.email_re,
  137. )
  138. return self.serializer.render(filtered)
  139. class LinkifyFilter(html5lib_shim.Filter):
  140. """html5lib filter that linkifies text
  141. This will do the following:
  142. * convert email addresses into links
  143. * convert urls into links
  144. * edit existing links by running them through callbacks--the default is to
  145. add a ``rel="nofollow"``
  146. This filter can be used anywhere html5lib filters can be used.
  147. """
  148. def __init__(
  149. self,
  150. source,
  151. callbacks=DEFAULT_CALLBACKS,
  152. skip_tags=None,
  153. parse_email=False,
  154. url_re=URL_RE,
  155. email_re=EMAIL_RE,
  156. ):
  157. """Creates a LinkifyFilter instance
  158. :arg source: stream as an html5lib TreeWalker
  159. :arg list callbacks: list of callbacks to run when adjusting tag attributes;
  160. defaults to ``bleach.linkifier.DEFAULT_CALLBACKS``
  161. :arg list skip_tags: list of tags that you don't want to linkify the
  162. contents of; for example, you could set this to ``['pre']`` to skip
  163. linkifying contents of ``pre`` tags
  164. :arg bool parse_email: whether or not to linkify email addresses
  165. :arg url_re: url matching regex
  166. :arg email_re: email matching regex
  167. """
  168. super().__init__(source)
  169. self.callbacks = callbacks or []
  170. self.skip_tags = skip_tags or []
  171. self.parse_email = parse_email
  172. self.url_re = url_re
  173. self.email_re = email_re
  174. def apply_callbacks(self, attrs, is_new):
  175. """Given an attrs dict and an is_new bool, runs through callbacks
  176. Callbacks can return an adjusted attrs dict or ``None``. In the case of
  177. ``None``, we stop going through callbacks and return that and the link
  178. gets dropped.
  179. :arg dict attrs: map of ``(namespace, name)`` -> ``value``
  180. :arg bool is_new: whether or not this link was added by linkify
  181. :returns: adjusted attrs dict or ``None``
  182. """
  183. for cb in self.callbacks:
  184. attrs = cb(attrs, is_new)
  185. if attrs is None:
  186. return None
  187. return attrs
  188. def extract_character_data(self, token_list):
  189. """Extracts and squashes character sequences in a token stream"""
  190. # FIXME(willkg): This is a terrible idea. What it does is drop all the
  191. # tags from the token list and merge the Characters and SpaceCharacters
  192. # tokens into a single text.
  193. #
  194. # So something like this::
  195. #
  196. # "<span>" "<b>" "some text" "</b>" "</span>"
  197. #
  198. # gets converted to "some text".
  199. #
  200. # This gets used to figure out the ``_text`` fauxttribute value for
  201. # linkify callables.
  202. #
  203. # I'm not really sure how else to support that ``_text`` fauxttribute and
  204. # maintain some modicum of backwards compatibility with previous versions
  205. # of Bleach.
  206. out = []
  207. for token in token_list:
  208. token_type = token["type"]
  209. if token_type in ["Characters", "SpaceCharacters"]:
  210. out.append(token["data"])
  211. return "".join(out)
  212. def handle_email_addresses(self, src_iter):
  213. """Handle email addresses in character tokens"""
  214. for token in src_iter:
  215. if token["type"] == "Characters":
  216. text = token["data"]
  217. new_tokens = []
  218. end = 0
  219. # For each email address we find in the text
  220. for match in self.email_re.finditer(text):
  221. if match.start() > end:
  222. new_tokens.append(
  223. {"type": "Characters", "data": text[end : match.start()]}
  224. )
  225. # URL-encode the "local-part" according to RFC6068
  226. parts = match.group(0).split("@")
  227. parts[0] = quote(parts[0])
  228. address = "@".join(parts)
  229. # Run attributes through the callbacks to see what we
  230. # should do with this match
  231. attrs = {
  232. (None, "href"): "mailto:%s" % address,
  233. "_text": match.group(0),
  234. }
  235. attrs = self.apply_callbacks(attrs, True)
  236. if attrs is None:
  237. # Just add the text--but not as a link
  238. new_tokens.append(
  239. {"type": "Characters", "data": match.group(0)}
  240. )
  241. else:
  242. # Add an "a" tag for the new link
  243. _text = attrs.pop("_text", "")
  244. new_tokens.extend(
  245. [
  246. {"type": "StartTag", "name": "a", "data": attrs},
  247. {"type": "Characters", "data": str(_text)},
  248. {"type": "EndTag", "name": "a"},
  249. ]
  250. )
  251. end = match.end()
  252. if new_tokens:
  253. # Yield the adjusted set of tokens and then continue
  254. # through the loop
  255. if end < len(text):
  256. new_tokens.append({"type": "Characters", "data": text[end:]})
  257. yield from new_tokens
  258. continue
  259. yield token
  260. def strip_non_url_bits(self, fragment):
  261. """Strips non-url bits from the url
  262. This accounts for over-eager matching by the regex.
  263. """
  264. prefix = suffix = ""
  265. while fragment:
  266. # Try removing ( from the beginning and, if it's balanced, from the
  267. # end, too
  268. if fragment.startswith("("):
  269. prefix = prefix + "("
  270. fragment = fragment[1:]
  271. if fragment.endswith(")"):
  272. suffix = ")" + suffix
  273. fragment = fragment[:-1]
  274. continue
  275. # Now try extraneous things from the end. For example, sometimes we
  276. # pick up ) at the end of a url, but the url is in a parenthesized
  277. # phrase like:
  278. #
  279. # "i looked at the site (at http://example.com)"
  280. if fragment.endswith(")") and "(" not in fragment:
  281. fragment = fragment[:-1]
  282. suffix = ")" + suffix
  283. continue
  284. # Handle commas
  285. if fragment.endswith(","):
  286. fragment = fragment[:-1]
  287. suffix = "," + suffix
  288. continue
  289. # Handle periods
  290. if fragment.endswith("."):
  291. fragment = fragment[:-1]
  292. suffix = "." + suffix
  293. continue
  294. # Nothing matched, so we're done
  295. break
  296. return fragment, prefix, suffix
  297. def handle_links(self, src_iter):
  298. """Handle links in character tokens"""
  299. in_a = False # happens, if parse_email=True and if a mail was found
  300. for token in src_iter:
  301. if in_a:
  302. if token["type"] == "EndTag" and token["name"] == "a":
  303. in_a = False
  304. yield token
  305. continue
  306. elif token["type"] == "StartTag" and token["name"] == "a":
  307. in_a = True
  308. yield token
  309. continue
  310. if token["type"] == "Characters":
  311. text = token["data"]
  312. new_tokens = []
  313. end = 0
  314. for match in self.url_re.finditer(text):
  315. if match.start() > end:
  316. new_tokens.append(
  317. {"type": "Characters", "data": text[end : match.start()]}
  318. )
  319. url = match.group(0)
  320. prefix = suffix = ""
  321. # Sometimes we pick up too much in the url match, so look for
  322. # bits we should drop and remove them from the match
  323. url, prefix, suffix = self.strip_non_url_bits(url)
  324. # If there's no protocol, add one
  325. if PROTO_RE.search(url):
  326. href = url
  327. else:
  328. href = "http://%s" % url
  329. attrs = {(None, "href"): href, "_text": url}
  330. attrs = self.apply_callbacks(attrs, True)
  331. if attrs is None:
  332. # Just add the text
  333. new_tokens.append(
  334. {"type": "Characters", "data": prefix + url + suffix}
  335. )
  336. else:
  337. # Add the "a" tag!
  338. if prefix:
  339. new_tokens.append({"type": "Characters", "data": prefix})
  340. _text = attrs.pop("_text", "")
  341. new_tokens.extend(
  342. [
  343. {"type": "StartTag", "name": "a", "data": attrs},
  344. {"type": "Characters", "data": str(_text)},
  345. {"type": "EndTag", "name": "a"},
  346. ]
  347. )
  348. if suffix:
  349. new_tokens.append({"type": "Characters", "data": suffix})
  350. end = match.end()
  351. if new_tokens:
  352. # Yield the adjusted set of tokens and then continue
  353. # through the loop
  354. if end < len(text):
  355. new_tokens.append({"type": "Characters", "data": text[end:]})
  356. yield from new_tokens
  357. continue
  358. yield token
  359. def handle_a_tag(self, token_buffer):
  360. """Handle the "a" tag
  361. This could adjust the link or drop it altogether depending on what the
  362. callbacks return.
  363. This yields the new set of tokens.
  364. """
  365. a_token = token_buffer[0]
  366. if a_token["data"]:
  367. attrs = a_token["data"]
  368. else:
  369. attrs = {}
  370. text = self.extract_character_data(token_buffer)
  371. attrs["_text"] = text
  372. attrs = self.apply_callbacks(attrs, False)
  373. if attrs is None:
  374. # We're dropping the "a" tag and everything else and replacing
  375. # it with character data. So emit that token.
  376. yield {"type": "Characters", "data": text}
  377. else:
  378. new_text = attrs.pop("_text", "")
  379. a_token["data"] = attrs
  380. if text == new_text:
  381. # The callbacks didn't change the text, so we yield the new "a"
  382. # token, then whatever else was there, then the end "a" token
  383. yield a_token
  384. yield from token_buffer[1:]
  385. else:
  386. # If the callbacks changed the text, then we're going to drop
  387. # all the tokens between the start and end "a" tags and replace
  388. # it with the new text
  389. yield a_token
  390. yield {"type": "Characters", "data": str(new_text)}
  391. yield token_buffer[-1]
  392. def __iter__(self):
  393. in_a = False
  394. in_skip_tag = None
  395. token_buffer = []
  396. for token in super().__iter__():
  397. if in_a:
  398. # Handle the case where we're in an "a" tag--we want to buffer tokens
  399. # until we hit an end "a" tag.
  400. if token["type"] == "EndTag" and token["name"] == "a":
  401. # Add the end tag to the token buffer and then handle them
  402. # and yield anything returned
  403. token_buffer.append(token)
  404. yield from self.handle_a_tag(token_buffer)
  405. # Clear "a" related state and continue since we've yielded all
  406. # the tokens we're going to yield
  407. in_a = False
  408. token_buffer = []
  409. else:
  410. token_buffer.append(token)
  411. continue
  412. if token["type"] in ["StartTag", "EmptyTag"]:
  413. if token["name"] in self.skip_tags:
  414. # Skip tags start a "special mode" where we don't linkify
  415. # anything until the end tag.
  416. in_skip_tag = token["name"]
  417. elif token["name"] == "a":
  418. # The "a" tag is special--we switch to a slurp mode and
  419. # slurp all the tokens until the end "a" tag and then
  420. # figure out what to do with them there.
  421. in_a = True
  422. token_buffer.append(token)
  423. # We buffer the start tag, so we don't want to yield it,
  424. # yet
  425. continue
  426. elif in_skip_tag and self.skip_tags:
  427. # NOTE(willkg): We put this clause here since in_a and
  428. # switching in and out of in_a takes precedence.
  429. if token["type"] == "EndTag" and token["name"] == in_skip_tag:
  430. in_skip_tag = None
  431. elif not in_a and not in_skip_tag and token["type"] == "Characters":
  432. new_stream = iter([token])
  433. if self.parse_email:
  434. new_stream = self.handle_email_addresses(new_stream)
  435. new_stream = self.handle_links(new_stream)
  436. for token in new_stream:
  437. yield token
  438. # We've already yielded this token, so continue
  439. continue
  440. yield token