You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

344 lines
11 KiB

  1. import csv
  2. import io
  3. import json
  4. import os
  5. from typing import Any, Dict, Iterator, List, Tuple
  6. import chardet
  7. import pyexcel
  8. import pyexcel.exceptions
  9. from chardet import UniversalDetector
  10. from seqeval.scheme import BILOU, IOB2, IOBES, IOE2, Tokens
  11. from .exceptions import FileParseException
  12. from .readers import DEFAULT_LABEL_COLUMN, DEFAULT_TEXT_COLUMN, Parser
  13. DEFAULT_ENCODING = 'Auto'
  14. def detect_encoding(filename: str, buffer_size: int = io.DEFAULT_BUFFER_SIZE) -> str:
  15. """Detects character encoding automatically.
  16. If you want to know the supported encodings, please see the following document:
  17. https://chardet.readthedocs.io/en/latest/supported-encodings.html
  18. Args:
  19. filename: the filename for detecting the encoding.
  20. buffer_size: the buffer size to read file contents incrementally.
  21. Returns:
  22. The character encoding.
  23. """
  24. # For a small file.
  25. if os.path.getsize(filename) < buffer_size:
  26. detected = chardet.detect(open(filename, 'rb').read())
  27. return detected.get('encoding', 'utf-8')
  28. # For a large file, call the Universal Encoding Detector incrementally.
  29. # It will stop as soon as it is confident enough to report its results.
  30. # See: https://chardet.readthedocs.io/en/latest/usage.html
  31. with open(filename, 'rb') as f:
  32. detector = UniversalDetector()
  33. while True:
  34. binary = f.read(buffer_size)
  35. detector.feed(binary)
  36. if binary == b'':
  37. break
  38. if detector.done:
  39. break
  40. if detector.done:
  41. return detector.result['encoding']
  42. else:
  43. return 'utf-8'
  44. def decide_encoding(filename: str, encoding: str) -> str:
  45. """Decide character encoding automatically.
  46. If the encoding is DEFAULT_ENCODING, detects it automatically.
  47. Otherwise, return it as is.
  48. Args:
  49. filename: The filename for decide the encoding.
  50. encoding: The specified encoding.
  51. Returns:
  52. The character encoding.
  53. """
  54. if encoding == DEFAULT_ENCODING:
  55. return detect_encoding(filename)
  56. else:
  57. return encoding
  58. class LineReader:
  59. """LineReader is a helper class to read a file line by line.
  60. Attributes:
  61. filename: The filename to read.
  62. encoding: The character encoding.
  63. """
  64. def __init__(self, filename: str, encoding: str = DEFAULT_ENCODING):
  65. self.filename = filename
  66. self.encoding = encoding
  67. def __iter__(self) -> Iterator[str]:
  68. encoding = decide_encoding(self.filename, self.encoding)
  69. with open(self.filename, encoding=encoding) as f:
  70. for line in f:
  71. yield line.rstrip()
  72. class PlainParser(Parser):
  73. """PlainParser is a parser simply returns a dictionary.
  74. This is for a task without any text.
  75. """
  76. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  77. yield {}
  78. class LineParser(Parser):
  79. """LineParser is a parser to read a file line by line.
  80. Attributes:
  81. encoding: The character encoding.
  82. """
  83. def __init__(self, encoding: str = DEFAULT_ENCODING, **kwargs):
  84. self.encoding = encoding
  85. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  86. reader = LineReader(filename, self.encoding)
  87. for line in reader:
  88. yield {DEFAULT_TEXT_COLUMN: line}
  89. class TextFileParser(Parser):
  90. """TextFileParser is a parser to read an entire file content.
  91. Attributes:
  92. encoding: The character encoding.
  93. """
  94. def __init__(self, encoding: str = DEFAULT_ENCODING, **kwargs):
  95. self.encoding = encoding
  96. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  97. encoding = decide_encoding(filename, self.encoding)
  98. with open(filename, encoding=encoding) as f:
  99. yield {DEFAULT_TEXT_COLUMN: f.read()}
  100. class CSVParser(Parser):
  101. """CSVParser is a parser to read a csv file and return its rows.
  102. Attributes:
  103. encoding: The character encoding.
  104. delimiter: A one-character string used to separate fields. It defaults to ','.
  105. """
  106. def __init__(self, encoding: str = DEFAULT_ENCODING, delimiter: str = ',', **kwargs):
  107. self.encoding = encoding
  108. self.delimiter = delimiter
  109. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  110. encoding = decide_encoding(filename, self.encoding)
  111. with open(filename, encoding=encoding) as f:
  112. reader = csv.DictReader(f, delimiter=self.delimiter)
  113. for line_num, row in enumerate(reader, start=2):
  114. yield row
  115. class JSONParser(Parser):
  116. """JSONParser is a parser to read a json file and return its rows.
  117. Attributes:
  118. encoding: The character encoding.
  119. """
  120. def __init__(self, encoding: str = DEFAULT_ENCODING, **kwargs):
  121. self.encoding = encoding
  122. self._errors = []
  123. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  124. encoding = decide_encoding(filename, self.encoding)
  125. with open(filename, encoding=encoding) as f:
  126. try:
  127. rows = json.load(f)
  128. for line_num, row in enumerate(rows, start=1):
  129. yield row
  130. except json.decoder.JSONDecodeError as e:
  131. error = FileParseException(filename, line_num=1, message=str(e))
  132. self._errors.append(error)
  133. @property
  134. def errors(self) -> List[FileParseException]:
  135. return self._errors
  136. class JSONLParser(Parser):
  137. """JSONLParser is a parser to read a JSONL file and return its rows.
  138. Attributes:
  139. encoding: The character encoding.
  140. """
  141. def __init__(self, encoding: str = DEFAULT_ENCODING, **kwargs):
  142. self.encoding = encoding
  143. self._errors = []
  144. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  145. reader = LineReader(filename, self.encoding)
  146. for line_num, line in enumerate(reader, start=1):
  147. try:
  148. yield json.loads(line)
  149. except json.decoder.JSONDecodeError as e:
  150. error = FileParseException(filename, line_num, str(e))
  151. self._errors.append(error)
  152. @property
  153. def errors(self) -> List[FileParseException]:
  154. return self._errors
  155. class ExcelParser(Parser):
  156. """ExcelParser is a parser to read a excel file."""
  157. def __init__(self, **kwargs):
  158. self._errors = []
  159. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  160. rows = pyexcel.iget_records(file_name=filename)
  161. try:
  162. for line_num, row in enumerate(rows, start=1):
  163. yield row
  164. except pyexcel.exceptions.FileTypeNotSupported as e:
  165. error = FileParseException(filename, line_num=1, message=str(e))
  166. self._errors.append(error)
  167. @property
  168. def errors(self) -> List[FileParseException]:
  169. return self._errors
  170. class FastTextParser(Parser):
  171. """FastTextParser is a parser to read a fastText format and returns a text and labels.
  172. The example format is as follows:
  173. __label__positive I really enjoyed this restaurant.
  174. This format expects the category first, with the prefix __label__ before each category,
  175. and then the input text, like so,
  176. Attributes:
  177. encoding: The character encoding.
  178. label: The label prefix. It defaults to `__label__`.
  179. """
  180. def __init__(self, encoding: str = DEFAULT_ENCODING, label: str = '__label__', **kwargs):
  181. self.encoding = encoding
  182. self.label = label
  183. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  184. reader = LineReader(filename, self.encoding)
  185. for line_num, line in enumerate(reader, start=1):
  186. labels = []
  187. tokens = []
  188. for token in line.rstrip().split(' '):
  189. if token.startswith(self.label):
  190. label_name = token[len(self.label):]
  191. labels.append(label_name)
  192. else:
  193. tokens.append(token)
  194. text = ' '.join(tokens)
  195. yield {DEFAULT_TEXT_COLUMN: text, DEFAULT_LABEL_COLUMN: labels}
  196. class CoNLLParser(Parser):
  197. """CoNLLParser is a parser to read conll like format and returns a text and labels.
  198. The example format is as follows:
  199. EU B-ORG
  200. rejects O
  201. German B-MISC
  202. call O
  203. to O
  204. boycott O
  205. British B-MISC
  206. lamb O
  207. . O
  208. Peter B-PER
  209. Blackburn I-PER
  210. This format expects a token in the first column, and a tag in the second column.
  211. The each data is separated by a new line.
  212. Attributes:
  213. encoding: The character encoding.
  214. delimiter: A one-character string used to separate fields. It defaults to ' '.
  215. scheme: The tagging scheme. It supports `IOB2`, `IOE2`, `IOBES`, and `BILOU`.
  216. """
  217. def __init__(self, encoding: str = DEFAULT_ENCODING, delimiter: str = ' ', scheme: str = 'IOB2', **kwargs):
  218. self.encoding = encoding
  219. self.delimiter = delimiter
  220. mapping = {
  221. 'IOB2': IOB2,
  222. 'IOE2': IOE2,
  223. 'IOBES': IOBES,
  224. 'BILOU': BILOU
  225. }
  226. self._errors = []
  227. if scheme in mapping:
  228. self.scheme = mapping[scheme]
  229. else:
  230. self.scheme = None
  231. @property
  232. def errors(self) -> List[FileParseException]:
  233. return self._errors
  234. def parse(self, filename: str) -> Iterator[Dict[Any, Any]]:
  235. if not self.scheme:
  236. message = 'The specified scheme is not supported.'
  237. error = FileParseException(filename, line_num=1, message=message)
  238. self._errors.append(error)
  239. return
  240. reader = LineReader(filename, self.encoding)
  241. words, tags = [], []
  242. for line_num, line in enumerate(reader, start=1):
  243. line = line.rstrip()
  244. if line:
  245. tokens = line.split('\t')
  246. if len(tokens) != 2:
  247. message = 'A line must be separated by tab and has two columns.'
  248. self._errors.append(FileParseException(filename, line_num, message))
  249. return
  250. word, tag = tokens
  251. words.append(word)
  252. tags.append(tag)
  253. else:
  254. yield self.create_record(tags, words)
  255. words, tags = [], []
  256. if words:
  257. yield self.create_record(tags, words)
  258. def create_record(self, tags, words):
  259. text = self.delimiter.join(words)
  260. labels = self.align_span(words, tags)
  261. return {DEFAULT_TEXT_COLUMN: text, DEFAULT_LABEL_COLUMN: labels}
  262. def align_span(self, words: List[str], tags: List[str]) -> List[Tuple[int, int, str]]:
  263. tokens = Tokens(tags, self.scheme)
  264. labels = []
  265. for entity in tokens.entities:
  266. text = self.delimiter.join(words[:entity.start])
  267. start = len(text) + len(self.delimiter) if text else len(text)
  268. chunk = words[entity.start: entity.end]
  269. text = self.delimiter.join(chunk)
  270. end = start + len(text)
  271. labels.append((start, end, entity.tag))
  272. return labels