api.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. import logging
  2. from os import PathLike
  3. from typing import Any, BinaryIO, List, Optional, Set
  4. from .cd import (
  5. coherence_ratio,
  6. encoding_languages,
  7. mb_encoding_languages,
  8. merge_coherence_ratios,
  9. )
  10. from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
  11. from .md import mess_ratio
  12. from .models import CharsetMatch, CharsetMatches
  13. from .utils import (
  14. any_specified_encoding,
  15. cut_sequence_chunks,
  16. iana_name,
  17. identify_sig_or_bom,
  18. is_cp_similar,
  19. is_multi_byte_encoding,
  20. should_strip_sig_or_bom,
  21. )
  22. # Will most likely be controversial
  23. # logging.addLevelName(TRACE, "TRACE")
  24. logger = logging.getLogger("charset_normalizer")
  25. explain_handler = logging.StreamHandler()
  26. explain_handler.setFormatter(
  27. logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
  28. )
  29. def from_bytes(
  30. sequences: bytes,
  31. steps: int = 5,
  32. chunk_size: int = 512,
  33. threshold: float = 0.2,
  34. cp_isolation: Optional[List[str]] = None,
  35. cp_exclusion: Optional[List[str]] = None,
  36. preemptive_behaviour: bool = True,
  37. explain: bool = False,
  38. language_threshold: float = 0.1,
  39. ) -> CharsetMatches:
  40. """
  41. Given a raw bytes sequence, return the best possibles charset usable to render str objects.
  42. If there is no results, it is a strong indicator that the source is binary/not text.
  43. By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
  44. And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
  45. The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
  46. but never take it for granted. Can improve the performance.
  47. You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
  48. purpose.
  49. This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
  50. By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
  51. toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
  52. Custom logging format and handler can be set manually.
  53. """
  54. if not isinstance(sequences, (bytearray, bytes)):
  55. raise TypeError(
  56. "Expected object of type bytes or bytearray, got: {0}".format(
  57. type(sequences)
  58. )
  59. )
  60. if explain:
  61. previous_logger_level: int = logger.level
  62. logger.addHandler(explain_handler)
  63. logger.setLevel(TRACE)
  64. length: int = len(sequences)
  65. if length == 0:
  66. logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
  67. if explain:
  68. logger.removeHandler(explain_handler)
  69. logger.setLevel(previous_logger_level or logging.WARNING)
  70. return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
  71. if cp_isolation is not None:
  72. logger.log(
  73. TRACE,
  74. "cp_isolation is set. use this flag for debugging purpose. "
  75. "limited list of encoding allowed : %s.",
  76. ", ".join(cp_isolation),
  77. )
  78. cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
  79. else:
  80. cp_isolation = []
  81. if cp_exclusion is not None:
  82. logger.log(
  83. TRACE,
  84. "cp_exclusion is set. use this flag for debugging purpose. "
  85. "limited list of encoding excluded : %s.",
  86. ", ".join(cp_exclusion),
  87. )
  88. cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
  89. else:
  90. cp_exclusion = []
  91. if length <= (chunk_size * steps):
  92. logger.log(
  93. TRACE,
  94. "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
  95. steps,
  96. chunk_size,
  97. length,
  98. )
  99. steps = 1
  100. chunk_size = length
  101. if steps > 1 and length / steps < chunk_size:
  102. chunk_size = int(length / steps)
  103. is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
  104. is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
  105. if is_too_small_sequence:
  106. logger.log(
  107. TRACE,
  108. "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
  109. length
  110. ),
  111. )
  112. elif is_too_large_sequence:
  113. logger.log(
  114. TRACE,
  115. "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
  116. length
  117. ),
  118. )
  119. prioritized_encodings: List[str] = []
  120. specified_encoding: Optional[str] = (
  121. any_specified_encoding(sequences) if preemptive_behaviour else None
  122. )
  123. if specified_encoding is not None:
  124. prioritized_encodings.append(specified_encoding)
  125. logger.log(
  126. TRACE,
  127. "Detected declarative mark in sequence. Priority +1 given for %s.",
  128. specified_encoding,
  129. )
  130. tested: Set[str] = set()
  131. tested_but_hard_failure: List[str] = []
  132. tested_but_soft_failure: List[str] = []
  133. fallback_ascii: Optional[CharsetMatch] = None
  134. fallback_u8: Optional[CharsetMatch] = None
  135. fallback_specified: Optional[CharsetMatch] = None
  136. results: CharsetMatches = CharsetMatches()
  137. sig_encoding, sig_payload = identify_sig_or_bom(sequences)
  138. if sig_encoding is not None:
  139. prioritized_encodings.append(sig_encoding)
  140. logger.log(
  141. TRACE,
  142. "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
  143. len(sig_payload),
  144. sig_encoding,
  145. )
  146. prioritized_encodings.append("ascii")
  147. if "utf_8" not in prioritized_encodings:
  148. prioritized_encodings.append("utf_8")
  149. for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
  150. if cp_isolation and encoding_iana not in cp_isolation:
  151. continue
  152. if cp_exclusion and encoding_iana in cp_exclusion:
  153. continue
  154. if encoding_iana in tested:
  155. continue
  156. tested.add(encoding_iana)
  157. decoded_payload: Optional[str] = None
  158. bom_or_sig_available: bool = sig_encoding == encoding_iana
  159. strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
  160. encoding_iana
  161. )
  162. if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
  163. logger.log(
  164. TRACE,
  165. "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
  166. encoding_iana,
  167. )
  168. continue
  169. if encoding_iana in {"utf_7"} and not bom_or_sig_available:
  170. logger.log(
  171. TRACE,
  172. "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
  173. encoding_iana,
  174. )
  175. continue
  176. try:
  177. is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
  178. except (ModuleNotFoundError, ImportError):
  179. logger.log(
  180. TRACE,
  181. "Encoding %s does not provide an IncrementalDecoder",
  182. encoding_iana,
  183. )
  184. continue
  185. try:
  186. if is_too_large_sequence and is_multi_byte_decoder is False:
  187. str(
  188. sequences[: int(50e4)]
  189. if strip_sig_or_bom is False
  190. else sequences[len(sig_payload) : int(50e4)],
  191. encoding=encoding_iana,
  192. )
  193. else:
  194. decoded_payload = str(
  195. sequences
  196. if strip_sig_or_bom is False
  197. else sequences[len(sig_payload) :],
  198. encoding=encoding_iana,
  199. )
  200. except (UnicodeDecodeError, LookupError) as e:
  201. if not isinstance(e, LookupError):
  202. logger.log(
  203. TRACE,
  204. "Code page %s does not fit given bytes sequence at ALL. %s",
  205. encoding_iana,
  206. str(e),
  207. )
  208. tested_but_hard_failure.append(encoding_iana)
  209. continue
  210. similar_soft_failure_test: bool = False
  211. for encoding_soft_failed in tested_but_soft_failure:
  212. if is_cp_similar(encoding_iana, encoding_soft_failed):
  213. similar_soft_failure_test = True
  214. break
  215. if similar_soft_failure_test:
  216. logger.log(
  217. TRACE,
  218. "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
  219. encoding_iana,
  220. encoding_soft_failed,
  221. )
  222. continue
  223. r_ = range(
  224. 0 if not bom_or_sig_available else len(sig_payload),
  225. length,
  226. int(length / steps),
  227. )
  228. multi_byte_bonus: bool = (
  229. is_multi_byte_decoder
  230. and decoded_payload is not None
  231. and len(decoded_payload) < length
  232. )
  233. if multi_byte_bonus:
  234. logger.log(
  235. TRACE,
  236. "Code page %s is a multi byte encoding table and it appear that at least one character "
  237. "was encoded using n-bytes.",
  238. encoding_iana,
  239. )
  240. max_chunk_gave_up: int = int(len(r_) / 4)
  241. max_chunk_gave_up = max(max_chunk_gave_up, 2)
  242. early_stop_count: int = 0
  243. lazy_str_hard_failure = False
  244. md_chunks: List[str] = []
  245. md_ratios = []
  246. try:
  247. for chunk in cut_sequence_chunks(
  248. sequences,
  249. encoding_iana,
  250. r_,
  251. chunk_size,
  252. bom_or_sig_available,
  253. strip_sig_or_bom,
  254. sig_payload,
  255. is_multi_byte_decoder,
  256. decoded_payload,
  257. ):
  258. md_chunks.append(chunk)
  259. md_ratios.append(
  260. mess_ratio(
  261. chunk,
  262. threshold,
  263. explain is True and 1 <= len(cp_isolation) <= 2,
  264. )
  265. )
  266. if md_ratios[-1] >= threshold:
  267. early_stop_count += 1
  268. if (early_stop_count >= max_chunk_gave_up) or (
  269. bom_or_sig_available and strip_sig_or_bom is False
  270. ):
  271. break
  272. except UnicodeDecodeError as e: # Lazy str loading may have missed something there
  273. logger.log(
  274. TRACE,
  275. "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
  276. encoding_iana,
  277. str(e),
  278. )
  279. early_stop_count = max_chunk_gave_up
  280. lazy_str_hard_failure = True
  281. # We might want to check the sequence again with the whole content
  282. # Only if initial MD tests passes
  283. if (
  284. not lazy_str_hard_failure
  285. and is_too_large_sequence
  286. and not is_multi_byte_decoder
  287. ):
  288. try:
  289. sequences[int(50e3) :].decode(encoding_iana, errors="strict")
  290. except UnicodeDecodeError as e:
  291. logger.log(
  292. TRACE,
  293. "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
  294. encoding_iana,
  295. str(e),
  296. )
  297. tested_but_hard_failure.append(encoding_iana)
  298. continue
  299. mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
  300. if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
  301. tested_but_soft_failure.append(encoding_iana)
  302. logger.log(
  303. TRACE,
  304. "%s was excluded because of initial chaos probing. Gave up %i time(s). "
  305. "Computed mean chaos is %f %%.",
  306. encoding_iana,
  307. early_stop_count,
  308. round(mean_mess_ratio * 100, ndigits=3),
  309. )
  310. # Preparing those fallbacks in case we got nothing.
  311. if (
  312. encoding_iana in ["ascii", "utf_8", specified_encoding]
  313. and not lazy_str_hard_failure
  314. ):
  315. fallback_entry = CharsetMatch(
  316. sequences, encoding_iana, threshold, False, [], decoded_payload
  317. )
  318. if encoding_iana == specified_encoding:
  319. fallback_specified = fallback_entry
  320. elif encoding_iana == "ascii":
  321. fallback_ascii = fallback_entry
  322. else:
  323. fallback_u8 = fallback_entry
  324. continue
  325. logger.log(
  326. TRACE,
  327. "%s passed initial chaos probing. Mean measured chaos is %f %%",
  328. encoding_iana,
  329. round(mean_mess_ratio * 100, ndigits=3),
  330. )
  331. if not is_multi_byte_decoder:
  332. target_languages: List[str] = encoding_languages(encoding_iana)
  333. else:
  334. target_languages = mb_encoding_languages(encoding_iana)
  335. if target_languages:
  336. logger.log(
  337. TRACE,
  338. "{} should target any language(s) of {}".format(
  339. encoding_iana, str(target_languages)
  340. ),
  341. )
  342. cd_ratios = []
  343. # We shall skip the CD when its about ASCII
  344. # Most of the time its not relevant to run "language-detection" on it.
  345. if encoding_iana != "ascii":
  346. for chunk in md_chunks:
  347. chunk_languages = coherence_ratio(
  348. chunk,
  349. language_threshold,
  350. ",".join(target_languages) if target_languages else None,
  351. )
  352. cd_ratios.append(chunk_languages)
  353. cd_ratios_merged = merge_coherence_ratios(cd_ratios)
  354. if cd_ratios_merged:
  355. logger.log(
  356. TRACE,
  357. "We detected language {} using {}".format(
  358. cd_ratios_merged, encoding_iana
  359. ),
  360. )
  361. results.append(
  362. CharsetMatch(
  363. sequences,
  364. encoding_iana,
  365. mean_mess_ratio,
  366. bom_or_sig_available,
  367. cd_ratios_merged,
  368. decoded_payload,
  369. )
  370. )
  371. if (
  372. encoding_iana in [specified_encoding, "ascii", "utf_8"]
  373. and mean_mess_ratio < 0.1
  374. ):
  375. logger.debug(
  376. "Encoding detection: %s is most likely the one.", encoding_iana
  377. )
  378. if explain:
  379. logger.removeHandler(explain_handler)
  380. logger.setLevel(previous_logger_level)
  381. return CharsetMatches([results[encoding_iana]])
  382. if encoding_iana == sig_encoding:
  383. logger.debug(
  384. "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
  385. "the beginning of the sequence.",
  386. encoding_iana,
  387. )
  388. if explain:
  389. logger.removeHandler(explain_handler)
  390. logger.setLevel(previous_logger_level)
  391. return CharsetMatches([results[encoding_iana]])
  392. if len(results) == 0:
  393. if fallback_u8 or fallback_ascii or fallback_specified:
  394. logger.log(
  395. TRACE,
  396. "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
  397. )
  398. if fallback_specified:
  399. logger.debug(
  400. "Encoding detection: %s will be used as a fallback match",
  401. fallback_specified.encoding,
  402. )
  403. results.append(fallback_specified)
  404. elif (
  405. (fallback_u8 and fallback_ascii is None)
  406. or (
  407. fallback_u8
  408. and fallback_ascii
  409. and fallback_u8.fingerprint != fallback_ascii.fingerprint
  410. )
  411. or (fallback_u8 is not None)
  412. ):
  413. logger.debug("Encoding detection: utf_8 will be used as a fallback match")
  414. results.append(fallback_u8)
  415. elif fallback_ascii:
  416. logger.debug("Encoding detection: ascii will be used as a fallback match")
  417. results.append(fallback_ascii)
  418. if results:
  419. logger.debug(
  420. "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
  421. results.best().encoding, # type: ignore
  422. len(results) - 1,
  423. )
  424. else:
  425. logger.debug("Encoding detection: Unable to determine any suitable charset.")
  426. if explain:
  427. logger.removeHandler(explain_handler)
  428. logger.setLevel(previous_logger_level)
  429. return results
  430. def from_fp(
  431. fp: BinaryIO,
  432. steps: int = 5,
  433. chunk_size: int = 512,
  434. threshold: float = 0.20,
  435. cp_isolation: Optional[List[str]] = None,
  436. cp_exclusion: Optional[List[str]] = None,
  437. preemptive_behaviour: bool = True,
  438. explain: bool = False,
  439. language_threshold: float = 0.1,
  440. ) -> CharsetMatches:
  441. """
  442. Same thing than the function from_bytes but using a file pointer that is already ready.
  443. Will not close the file pointer.
  444. """
  445. return from_bytes(
  446. fp.read(),
  447. steps,
  448. chunk_size,
  449. threshold,
  450. cp_isolation,
  451. cp_exclusion,
  452. preemptive_behaviour,
  453. explain,
  454. language_threshold,
  455. )
  456. def from_path(
  457. path: "PathLike[Any]",
  458. steps: int = 5,
  459. chunk_size: int = 512,
  460. threshold: float = 0.20,
  461. cp_isolation: Optional[List[str]] = None,
  462. cp_exclusion: Optional[List[str]] = None,
  463. preemptive_behaviour: bool = True,
  464. explain: bool = False,
  465. language_threshold: float = 0.1,
  466. ) -> CharsetMatches:
  467. """
  468. Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
  469. Can raise IOError.
  470. """
  471. with open(path, "rb") as fp:
  472. return from_fp(
  473. fp,
  474. steps,
  475. chunk_size,
  476. threshold,
  477. cp_isolation,
  478. cp_exclusion,
  479. preemptive_behaviour,
  480. explain,
  481. language_threshold,
  482. )