BuildLangModel.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. #!/bin/python3
  2. # -*- coding: utf-8 -*-
  3. # ##### BEGIN LICENSE BLOCK #####
  4. # Version: MPL 1.1/GPL 2.0/LGPL 2.1
  5. #
  6. # The contents of this file are subject to the Mozilla Public License Version
  7. # 1.1 (the "License"); you may not use this file except in compliance with
  8. # the License. You may obtain a copy of the License at
  9. # http://www.mozilla.org/MPL/
  10. #
  11. # Software distributed under the License is distributed on an "AS IS" basis,
  12. # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
  13. # for the specific language governing rights and limitations under the
  14. # License.
  15. #
  16. # The Original Code is Mozilla Universal charset detector code.
  17. #
  18. # The Initial Developer of the Original Code is
  19. # Netscape Communications Corporation.
  20. # Portions created by the Initial Developer are Copyright (C) 2001
  21. # the Initial Developer. All Rights Reserved.
  22. #
  23. # Contributor(s):
  24. # Jehan <jehan@girinstud.io>
  25. #
  26. # Alternatively, the contents of this file may be used under the terms of
  27. # either the GNU General Public License Version 2 or later (the "GPL"), or
  28. # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  29. # in which case the provisions of the GPL or the LGPL are applicable instead
  30. # of those above. If you wish to allow use of your version of this file only
  31. # under the terms of either the GPL or the LGPL, and not to allow others to
  32. # use your version of this file under the terms of the MPL, indicate your
  33. # decision by deleting the provisions above and replace them with the notice
  34. # and other provisions required by the GPL or the LGPL. If you do not delete
  35. # the provisions above, a recipient may use your version of this file under
  36. # the terms of any one of the MPL, the GPL or the LGPL.
  37. #
  38. # ##### END LICENSE BLOCK #####
  39. # Third party modules.
  40. import unicodedata
  41. import subprocess
  42. import wikipedia
  43. import importlib
  44. import optparse
  45. import datetime
  46. import operator
  47. import requests
  48. import sys
  49. import re
  50. import os
  51. # Custom modules.
  52. import charsets.db
  53. from charsets.codepoints import *
  54. # Command line processing.
  55. usage = 'Usage: {} <LANG-CODE>\n' \
  56. '\nEx: `{} fr`'.format(__file__, __file__)
  57. description = "Internal tool for uchardet to generate language data."
  58. cmdline = optparse.OptionParser(usage, description = description)
  59. cmdline.add_option('--max-page',
  60. help = 'Maximum number of Wikipedia pages to parse (useful for debugging).',
  61. action = 'store', type = 'int', dest = 'max_page', default = None)
  62. cmdline.add_option('--max-depth',
  63. help = 'Maximum depth when following links from start page (default: 2).',
  64. action = 'store', type = 'int',
  65. dest = 'max_depth', default = 2)
  66. (options, langs) = cmdline.parse_args()
  67. if len(langs) < 1:
  68. print("Please select at least one language code.\n")
  69. exit(1)
  70. if len(langs) > 1:
  71. print("This script is meant to generate data for one language at a time.\n")
  72. exit(1)
  73. lang = langs[0]
  74. # Load the language data.
  75. sys_path_backup = sys.path
  76. current_dir = os.path.dirname(os.path.realpath(__file__))
  77. sys.path = [current_dir + '/langs']
  78. try:
  79. lang = importlib.import_module(lang.lower())
  80. except ImportError:
  81. print('Unknown language code "{}": '
  82. 'file "langs/{}.py" does not exist.'.format(lang, lang.lower()))
  83. exit(1)
  84. sys.path = sys_path_backup
  85. charsets = charsets.db.load(lang.charsets)
  86. if not hasattr(lang, 'start_pages') or lang.start_pages is None or \
  87. lang.start_pages == []:
  88. # Let's start with the main page, assuming it should have links
  89. # to relevant pages. In locale wikipedia, this page is usually redirected
  90. # to a relevant page.
  91. print("Warning: no `start_pages` set for '{}'. Using ['Main_Page'].\n"
  92. " If you don't get good data, it is advised to set a "
  93. "start_pages` variable yourself.".format(lang.code))
  94. lang.start_pages = ['Main_Page']
  95. if not hasattr(lang, 'wikipedia_code') or lang.wikipedia_code is None:
  96. lang.wikipedia_code = lang.code
  97. if not hasattr(lang, 'clean_wikipedia_content') or lang.clean_wikipedia_content is None:
  98. lang.clean_wikipedia_content = None
  99. if hasattr(lang, 'case_mapping'):
  100. lang.case_mapping = bool(lang.case_mapping)
  101. else:
  102. lang.case_mapping = False
  103. if not hasattr(lang, 'custom_case_mapping'):
  104. lang.custom_case_mapping = None
  105. if not hasattr(lang, 'alphabet') or lang.alphabet is None:
  106. lang.alphabet = None
  107. def local_lowercase(text, lang):
  108. lowercased = ''
  109. for l in text:
  110. if lang.custom_case_mapping is not None and \
  111. l in lang.custom_case_mapping:
  112. lowercased += lang.custom_case_mapping[l]
  113. elif l.isupper() and \
  114. lang.case_mapping and \
  115. len(unicodedata.normalize('NFC', l.lower())) == 1:
  116. lowercased += l.lower()
  117. else:
  118. lowercased += l
  119. return lowercased
  120. if lang.alphabet is not None:
  121. # Allowing to provide an alphabet in string format rather than list.
  122. lang.alphabet = list(lang.alphabet)
  123. if lang.use_ascii:
  124. lang.alphabet += [chr(l) for l in range(65, 91)] + [chr(l) for l in range(97, 123)]
  125. if lang.case_mapping or lang.custom_case_mapping is not None:
  126. lang.alphabet = [local_lowercase(l, lang) for l in lang.alphabet]
  127. #alphabet = []
  128. #for l in lang.alphabet:
  129. #if l.isupper() and \
  130. #lang.custom_case_mapping is not None and \
  131. #l in lang.custom_case_mapping:
  132. #alphabet.append(lang.custom_case_mapping[l])
  133. #elif l.isupper() and \
  134. #lang.case_mapping and \
  135. #len(unicodedata.normalize('NFC', l.lower())) == 1:
  136. #alphabet.append(l.lower())
  137. #else:
  138. #alphabet.append(l)
  139. lang.alphabet = list(set(lang.alphabet))
  140. # Starting processing.
  141. wikipedia.set_lang(lang.wikipedia_code)
  142. visited_pages = []
  143. # The full list of letter characters.
  144. # The key is the unicode codepoint,
  145. # and the value is the occurrence count.
  146. characters = {}
  147. # Sequence of letters.
  148. # The key is the couple (char1, char2) in unicode codepoint,
  149. # the value is the occurrence count.
  150. sequences = {}
  151. prev_char = None
  152. def process_text(content, lang):
  153. global charsets
  154. global characters
  155. global sequences
  156. global prev_char
  157. if lang.clean_wikipedia_content is not None:
  158. content = lang.clean_wikipedia_content(content)
  159. # Clean out the Wikipedia syntax for titles.
  160. content = re.sub(r'(=+) *([^=]+) *\1',
  161. r'\2', content)
  162. # Clean multiple spaces. Newlines and such are normalized to spaces,
  163. # since they have basically a similar role in the purpose of uchardet.
  164. content = re.sub(r'\s+', ' ', content)
  165. if lang.case_mapping or lang.custom_case_mapping is not None:
  166. content = local_lowercase(content, lang)
  167. # In python 3, strings are UTF-8.
  168. # Looping through them return expected characters.
  169. for char in content:
  170. is_letter = False
  171. if ord(char) in characters:
  172. characters[ord(char)] += 1
  173. is_letter = True
  174. else:
  175. # We save the character if it is at least in one of the
  176. # language encodings and its not a special character.
  177. for charset in charsets:
  178. # Does the character exist in the charset?
  179. try:
  180. codepoint = char.encode(charset, 'ignore')
  181. except LookupError:
  182. # unknown encoding. Use iconv from command line instead.
  183. try:
  184. call = subprocess.Popen(['iconv', '-f', 'UTF-8', '-t', charset],
  185. stdin=subprocess.PIPE, stdout=subprocess.PIPE,
  186. stderr=subprocess.DEVNULL)
  187. if call.poll() is not None:
  188. (_, error) = call.communicate(input='')
  189. print('Error: `iconv` ended with error "{}".\n'.format(error))
  190. exit(1)
  191. (codepoint, _) = call.communicate(input=char.encode('UTF-8'))
  192. except FileNotFoundError:
  193. print('Error: "{}" is not a supported charset by python and `iconv` is not installed.\n')
  194. exit(1)
  195. if codepoint == b'':
  196. continue
  197. # ord() is said to return the unicode codepoint.
  198. # But it turns out it also gives the codepoint for other
  199. # charsets if I turn the string to encoded bytes first.
  200. # Not sure if that is a bug or expected.
  201. codepoint = ord(codepoint)
  202. if charsets[charset].charmap[codepoint] == LET:
  203. characters[ord(char)] = 1
  204. is_letter = True
  205. break
  206. if is_letter:
  207. if prev_char is not None:
  208. if (prev_char, ord(char)) in sequences:
  209. sequences[(prev_char, ord(char))] += 1
  210. else:
  211. sequences[(prev_char, ord(char))] = 1
  212. prev_char = ord(char)
  213. else:
  214. prev_char = None
  215. def visit_pages(titles, depth, lang, logfd):
  216. global visited_pages
  217. global options
  218. if len(titles) == 0:
  219. return
  220. next_titles = []
  221. for title in titles:
  222. if options.max_page is not None and \
  223. len(visited_pages) > options.max_page:
  224. return
  225. if title in visited_pages:
  226. continue
  227. visited_pages += [title]
  228. try:
  229. page = wikipedia.page(title)
  230. except (wikipedia.exceptions.PageError,
  231. wikipedia.exceptions.DisambiguationError):
  232. # Let's just discard a page when I get an exception.
  233. print("Discarding page {}.\n".format(title))
  234. continue
  235. logfd.write("\n{} (revision {})".format(title, page.revision_id))
  236. process_text(page.content, lang)
  237. try:
  238. next_titles += page.links
  239. except KeyError:
  240. pass
  241. if depth >= options.max_depth:
  242. return
  243. visit_pages (next_titles, depth + 1, lang, logfd)
  244. language_c = lang.name.replace('-', '_').title()
  245. build_log = current_dir + '/BuildLangModelLogs/Lang{}Model.log'.format(language_c)
  246. logfd = open(build_log, 'w')
  247. logfd.write('= Logs of language model for {} ({}) =\n'.format(lang.name, lang.code))
  248. logfd.write('\n- Generated by {}'.format(os.path.basename(__file__)))
  249. logfd.write('\n- Started: {}'.format(str(datetime.datetime.now())))
  250. logfd.write('\n- Maximum depth: {}'.format(options.max_depth))
  251. if options.max_page is not None:
  252. logfd.write('\n- Max number of pages: {}'.format(options.max_page))
  253. logfd.write('\n\n== Parsed pages ==\n')
  254. try:
  255. visit_pages(lang.start_pages, 0, lang, logfd)
  256. except requests.exceptions.ConnectionError:
  257. print('Error: connection to Wikipedia failed. Aborting\n')
  258. exit(1)
  259. logfd.write('\n\n== End of Parsed pages ==')
  260. logfd.write('\n\n- Wikipedia parsing ended at: {}\n'.format(str(datetime.datetime.now())))
  261. ########### CHARACTERS ###########
  262. # Character ratios.
  263. ratios = {}
  264. n_char = len(characters)
  265. occurrences = sum(characters.values())
  266. logfd.write("\n{} characters appeared {} times.\n".format(n_char, occurrences))
  267. for char in characters:
  268. ratios[char] = characters[char] / occurrences
  269. #logfd.write("Character '{}' usage: {} ({} %)\n".format(chr(char),
  270. # characters[char],
  271. # ratios[char] * 100))
  272. sorted_ratios = sorted(ratios.items(), key=operator.itemgetter(1),
  273. reverse=True)
  274. # Accumulated ratios of the frequent chars.
  275. accumulated_ratios = 0
  276. # If there is no alphabet defined, we just use the first 64 letters, which was
  277. # the original default.
  278. # If there is an alphabet, we make sure all the alphabet characters are in the
  279. # frequent list, and we stop then. There may therefore be more or less than
  280. # 64 frequent characters depending on the language.
  281. if lang.alphabet is None:
  282. freq_count = 64
  283. else:
  284. freq_count = 0
  285. for order, (char, ratio) in enumerate(sorted_ratios):
  286. if len(lang.alphabet) == 0:
  287. break
  288. if chr(char) in lang.alphabet:
  289. lang.alphabet.remove(chr(char))
  290. freq_count += 1
  291. else:
  292. if len(lang.alphabet) > 0:
  293. print("Error: alphabet characters are absent from data collection"
  294. "\n Please check the configuration or the data."
  295. "\n Missing characters: {}".format(", ".join(lang.alphabet)))
  296. exit(1)
  297. logfd.write('\nFirst {} characters:'.format(freq_count))
  298. for order, (char, ratio) in enumerate(sorted_ratios):
  299. if order >= freq_count:
  300. break
  301. logfd.write("\n[{:2}] Char {}: {} %".format(order, chr(char), ratio * 100))
  302. accumulated_ratios += ratio
  303. logfd.write("\n\nThe first {} characters have an accumulated ratio of {}.\n".format(freq_count, accumulated_ratios))
  304. with open(current_dir + '/header-template.cpp', 'r') as header_fd:
  305. c_code = header_fd.read()
  306. c_code += '\n/********* Language model for: {} *********/\n\n'.format(lang.name)
  307. c_code += '/**\n * Generated by {}\n'.format(os.path.basename(__file__))
  308. c_code += ' * On: {}\n'.format(str(datetime.datetime.now()))
  309. c_code += ' **/\n'
  310. c_code += \
  311. """
  312. /* Character Mapping Table:
  313. * ILL: illegal character.
  314. * CTR: control character specific to the charset.
  315. * RET: carriage/return.
  316. * SYM: symbol (punctuation) that does not belong to word.
  317. * NUM: 0 - 9.
  318. *
  319. * Other characters are ordered by probabilities
  320. * (0 is the most common character in the language).
  321. *
  322. * Orders are generic to a language. So the codepoint with order X in
  323. * CHARSET1 maps to the same character as the codepoint with the same
  324. * order X in CHARSET2 for the same language.
  325. * As such, it is possible to get missing order. For instance the
  326. * ligature of 'o' and 'e' exists in ISO-8859-15 but not in ISO-8859-1
  327. * even though they are both used for French. Same for the euro sign.
  328. */
  329. """
  330. for charset in charsets:
  331. charset_c = charset.replace('-', '_').title()
  332. CTOM_str = 'static const unsigned char {}_CharToOrderMap[]'.format(charset_c)
  333. CTOM_str += ' =\n{'
  334. for line in range(0, 16):
  335. CTOM_str += '\n '
  336. for column in range(0, 16):
  337. cp = line * 16 + column
  338. cp_type = charsets[charset].charmap[cp]
  339. if cp_type == ILL:
  340. CTOM_str += 'ILL,'
  341. elif cp_type == RET:
  342. CTOM_str += 'RET,'
  343. elif cp_type == CTR:
  344. CTOM_str += 'CTR,'
  345. elif cp_type == SYM:
  346. CTOM_str += 'SYM,'
  347. elif cp_type == NUM:
  348. CTOM_str += 'NUM,'
  349. else: # LET
  350. try:
  351. uchar = bytes([cp]).decode(charset)
  352. except UnicodeDecodeError:
  353. print('Unknown character 0X{:X} in {}.'.format(cp, charset))
  354. print('Please verify your charset specification.\n')
  355. exit(1)
  356. except LookupError:
  357. # Unknown encoding. Use iconv instead.
  358. try:
  359. call = subprocess.Popen(['iconv', '-t', 'UTF-8', '-f', charset],
  360. stdin=subprocess.PIPE,
  361. stdout=subprocess.PIPE,
  362. stderr=subprocess.PIPE)
  363. if call.poll() is not None:
  364. (_, error) = call.communicate(input='')
  365. print('Error: `iconv` ended with error "{}".\n'.format(error))
  366. exit(1)
  367. (uchar, _) = call.communicate(input=bytes([cp]))
  368. uchar = uchar.decode('UTF-8')
  369. except FileNotFoundError:
  370. print('Error: "{}" is not a supported charset by python and `iconv` is not installed.\n')
  371. exit(1)
  372. #if lang.case_mapping and uchar.isupper() and \
  373. #len(unicodedata.normalize('NFC', uchar.lower())) == 1:
  374. # Unless we encounter special cases of characters with no
  375. # composed lowercase, we lowercase it.
  376. if lang.case_mapping or lang.custom_case_mapping is not None:
  377. uchar = local_lowercase(uchar, lang)
  378. for order, (char, ratio) in enumerate(sorted_ratios):
  379. if char == ord(uchar):
  380. CTOM_str += '{:3},'.format(min(249, order))
  381. break
  382. else:
  383. # XXX: we must make sure the character order does not go
  384. # over the special characters (250 currently). This may
  385. # actually happen when building a model for a language
  386. # writable with many different encoding. So let's just
  387. # ceil the order value at 249 max.
  388. # It may be an interesting alternative to add another
  389. # constant for any character with an order > freqCharCount.
  390. # Maybe IRR (irrelevant character) or simply CHR.
  391. CTOM_str += '{:3},'.format(min(249, n_char))
  392. n_char += 1
  393. CTOM_str += ' /* {:X}X */'.format(line)
  394. CTOM_str += '\n};\n/*'
  395. CTOM_str += 'X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 XA XB XC XD XE XF'
  396. CTOM_str += ' */\n\n'
  397. c_code += CTOM_str
  398. ########### SEQUENCES ###########
  399. ratios = {}
  400. occurrences = sum(sequences.values())
  401. ratio_512 = 0
  402. ratio_1024 = 0
  403. sorted_seqs = sorted(sequences.items(), key=operator.itemgetter(1),
  404. reverse=True)
  405. for order, ((c1, c2), count) in enumerate(sorted_seqs):
  406. if order < 512:
  407. ratio_512 += count
  408. elif order < 1024:
  409. ratio_1024 += count
  410. else:
  411. break
  412. ratio_512 /= occurrences
  413. ratio_1024 /= occurrences
  414. logfd.write("\n{} sequences found.\n".format(len(sorted_seqs)))
  415. c_code += """
  416. /* Model Table:
  417. * Total sequences: {}
  418. * First 512 sequences: {}
  419. * Next 512 sequences (512-1024): {}
  420. * Rest: {}
  421. * Negative sequences: TODO""".format(len(sorted_seqs),
  422. ratio_512,
  423. ratio_1024,
  424. 1 - ratio_512 - ratio_1024)
  425. logfd.write("\nFirst 512 (typical positive ratio): {}".format(ratio_512))
  426. logfd.write("\nNext 512 (512-1024): {}".format(ratio))
  427. logfd.write("\nRest: {}".format(1 - ratio_512 - ratio_1024))
  428. c_code += "\n */\n"
  429. LM_str = 'static const PRUint8 {}LangModel[]'.format(language_c)
  430. LM_str += ' =\n{'
  431. for line in range(0, freq_count):
  432. LM_str += '\n '
  433. for column in range(0, freq_count):
  434. # Let's not make too long lines.
  435. if freq_count > 40 and column == int(freq_count / 2):
  436. LM_str += '\n '
  437. first_order = int(line)
  438. second_order = column
  439. if first_order < len(sorted_ratios) and second_order < len(sorted_ratios):
  440. (first_char, _) = sorted_ratios[first_order]
  441. (second_char, _) = sorted_ratios[second_order]
  442. if (first_char, second_char) in sequences:
  443. for order, (seq, _) in enumerate(sorted_seqs):
  444. if seq == (first_char, second_char):
  445. if order < 512:
  446. LM_str += '3,'
  447. elif order < 1024:
  448. LM_str += '2,'
  449. else:
  450. LM_str += '1,'
  451. break
  452. else:
  453. pass # impossible!
  454. LM_str += '0,'
  455. else:
  456. LM_str += '0,'
  457. else:
  458. # It may indeed happen that we find less than 64 letters used for a
  459. # given language.
  460. LM_str += '0,'
  461. LM_str += '\n};\n'
  462. c_code += LM_str
  463. for charset in charsets:
  464. charset_c = charset.replace('-', '_').title()
  465. SM_str = '\n\nconst SequenceModel {}{}Model ='.format(charset_c, language_c)
  466. SM_str += '\n{\n '
  467. SM_str += '{}_CharToOrderMap,\n {}LangModel,'.format(charset_c, language_c)
  468. SM_str += '\n {},'.format(freq_count)
  469. SM_str += '\n (float){},'.format(ratio_512)
  470. SM_str += '\n {},'.format('PR_TRUE' if lang.use_ascii else 'PR_FALSE')
  471. SM_str += '\n "{}"'.format(charset)
  472. SM_str += '\n};'
  473. c_code += SM_str
  474. lang_model_file = current_dir + '/../src/LangModels/Lang{}Model.cpp'.format(language_c)
  475. with open(lang_model_file, 'w') as cpp_fd:
  476. cpp_fd.write(c_code)
  477. logfd.write('\n\n- Processing end: {}\n'.format(str(datetime.datetime.now())))
  478. logfd.close()
  479. print("The following language model file has been generated: {}"
  480. "\nThe build log is available in: {}"
  481. "\nTest them and commit them.".format(lang_model_file, build_log))