test_binarizer.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. # Copyright (c) Facebook, Inc. and its affiliates.
  2. #
  3. # This source code is licensed under the MIT license found in the
  4. # LICENSE file in the root directory of this source tree.
  5. import os
  6. import typing as tp
  7. import unittest
  8. from tempfile import TemporaryDirectory
  9. from fairseq.binarizer import BinarizeSummary, FileBinarizer, VocabularyDatasetBinarizer
  10. from fairseq.data import Dictionary, indexed_dataset
  11. from tests.utils import make_data, sizes
  12. def build_vocab(data: tp.List[tp.List[str]]) -> Dictionary:
  13. d = Dictionary()
  14. for s in data:
  15. for token in s:
  16. d.add_symbol(token)
  17. d.finalize()
  18. return d
  19. class TestBinarizer(unittest.TestCase):
  20. def compare_ds_data(self, summary, data, prefix, impl, vocab):
  21. self.assertEqual(summary.num_seq, len(data))
  22. self.assertEqual(summary.num_tok, sum([len(s) for s in data]))
  23. dataset = indexed_dataset.make_dataset(prefix, impl)
  24. self.assertEqual(len(dataset), len(data))
  25. decoded = [vocab.string(dataset[i]).split() for i in range(0, len(dataset))]
  26. self.assertEqual(decoded, data)
  27. data_sizes = [i.item() for i in dataset.sizes]
  28. self.assertEqual(data_sizes, sizes(data))
  29. def test_can_binarize_line(self):
  30. data = make_data(length=1)
  31. vocab = build_vocab(data)
  32. binarizer = VocabularyDatasetBinarizer(
  33. vocab,
  34. )
  35. sentence = data[0]
  36. summary = BinarizeSummary()
  37. tensor = binarizer.binarize_line(
  38. " ".join(sentence),
  39. summary,
  40. )
  41. self.assertEqual(len(tensor), len(sentence) + 1)
  42. self.assertEqual(summary.num_tok, len(sentence) + 1)
  43. self.assertEqual(summary.num_seq, 1)
  44. def test_can_binarize_file_chunk(self):
  45. # test without multiprocess logic
  46. with TemporaryDirectory() as dirname:
  47. raw_file = os.path.join(dirname, "raw1")
  48. prefix = os.path.join(dirname, "test1")
  49. impl = "mmap"
  50. data = make_data(out_file=raw_file)
  51. vocab = build_vocab(data)
  52. binarizer = VocabularyDatasetBinarizer(
  53. vocab,
  54. append_eos=False,
  55. )
  56. summary = FileBinarizer._binarize_chunk_and_finalize(
  57. binarizer,
  58. raw_file,
  59. offset_start=0,
  60. offset_end=-1,
  61. output_prefix=prefix,
  62. dataset_impl=impl,
  63. vocab_size=len(vocab),
  64. )
  65. self.compare_ds_data(summary, data, prefix, impl, vocab)
  66. def test_can_multiprocess(self):
  67. with TemporaryDirectory() as dirname:
  68. raw_file = os.path.join(dirname, "raw1")
  69. prefix = os.path.join(dirname, "test1")
  70. impl = "mmap"
  71. data = make_data(out_file=raw_file)
  72. vocab = build_vocab(data)
  73. binarizer = VocabularyDatasetBinarizer(
  74. vocab,
  75. append_eos=False,
  76. )
  77. # with one worker
  78. summary = FileBinarizer.multiprocess_dataset(
  79. raw_file,
  80. impl,
  81. binarizer,
  82. output_prefix=prefix,
  83. vocab_size=len(vocab),
  84. num_workers=1,
  85. )
  86. self.compare_ds_data(summary, data, prefix, impl, vocab)
  87. # with multiple worker
  88. prefix_multi = os.path.join(dirname, "test2")
  89. summary = FileBinarizer.multiprocess_dataset(
  90. raw_file,
  91. impl,
  92. binarizer,
  93. output_prefix=prefix_multi,
  94. vocab_size=len(vocab),
  95. num_workers=3,
  96. )
  97. self.compare_ds_data(summary, data, prefix_multi, impl, vocab)