| | import sys |
| | import math |
| | import re |
| | import random |
| | import json |
| | from pathlib import Path |
| |
|
| |
|
| | __FILE_COUNT__ = 60 |
| | doc_regex = re.compile("<doc id=\"([^\"]+)_\\d+\">") |
| |
|
| | file_names = [] |
| | file_pointers = {} |
| | record_counter = {} |
| |
|
| | line_counter = 0 |
| | sum_token_count = 0 |
| | sum_token_sq = 0 |
| | sum_char_count = 0 |
| | sum_char_sq = 0 |
| | source_dist = {} |
| | dataset_names = { |
| | "2109_0.txt": "oscar_2109", |
| | "2109_1.txt": "oscar_2109", |
| | "2109_2.txt": "oscar_2109", |
| | "2109_3.txt": "oscar_2109", |
| | "2109_4.txt": "oscar_2109", |
| | "2109_5.txt": "oscar_2109", |
| | "2109_6.txt": "oscar_2109", |
| | "2109_7.txt": "oscar_2109", |
| | "2109_8.txt": "oscar_2109", |
| | "2109_9.txt": "oscar_2109", |
| | "2201_0.txt": "oscar_2201", |
| | "2201_1.txt": "oscar_2201", |
| | "2201_2.txt": "oscar_2201", |
| | "2201_3.txt": "oscar_2201", |
| | "2201_4.txt": "oscar_2201", |
| | "2201_5.txt": "oscar_2201", |
| | "2201_6.txt": "oscar_2201", |
| | "2201_7.txt": "oscar_2201", |
| | "2301_0.txt": "oscar_2301", |
| | "2301_10.txt": "oscar_2301", |
| | "2301_11.txt": "oscar_2301", |
| | "2301_1.txt": "oscar_2301", |
| | "2301_2.txt": "oscar_2301", |
| | "2301_3.txt": "oscar_2301", |
| | "2301_4.txt": "oscar_2301", |
| | "2301_5.txt": "oscar_2301", |
| | "2301_6.txt": "oscar_2301", |
| | "2301_7.txt": "oscar_2301", |
| | "2301_8.txt": "oscar_2301", |
| | "2301_9.txt": "oscar_2301", |
| | "commoncrawl_fa_merged_aa.txt": "cc", |
| | "commoncrawl_fa_merged_ab.txt": "cc", |
| | "commoncrawl_fa_merged_ac.txt": "cc", |
| | "commoncrawl_fa_merged_ad.txt": "cc", |
| | "commoncrawl_fa_merged_ae.txt": "cc", |
| | "commoncrawl_fa_merged_af.txt": "cc", |
| | "commoncrawl_fa_merged_ag.txt": "cc", |
| | "commoncrawl_fa_merged_ah.txt": "cc", |
| | "commoncrawl_fa_merged_ai.txt": "cc", |
| | "commoncrawl_fa_merged_aj.txt": "cc", |
| | "fas-ir_web-public_2019_100K-sentences.txt": "web-2019_100K", |
| | "fas-ir_web-public_2019_10K-sentences.txt": "web-2019_10K", |
| | "fas-ir_web-public_2019_1M-sentences.txt": "web-2019_1M", |
| | "fas-ir_web-public_2019_300K-sentences.txt": "web-2019_300K", |
| | "fas-ir_web-public_2019_30K-sentences.txt": "web-2019_30K", |
| | "fas_news_2019_100K-sentences.txt": "news_2019_100K", |
| | "fas_news_2019_10K-sentences.txt": "news_2019_10K", |
| | "fas_news_2019_300K-sentences.txt": "news_2019_300K", |
| | "fas_news_2019_30K-sentences.txt": "news_2019_30K", |
| | "fas_news_2020_100K-sentences.txt": "news_2020_100K", |
| | "fas_news_2020_10K-sentences.txt": "news_2020_10K", |
| | "fas_news_2020_300K-sentences.txt": "news_2020_300K", |
| | "fas_news_2020_30K-sentences.txt": "news_2020_30K", |
| | "fas_newscrawl_2011_100K-sentences.txt": "newscrawl_2011_100K", |
| | "fas_newscrawl_2011_10K-sentences.txt": "newscrawl_2011_10K", |
| | "fas_newscrawl_2011_1M-sentences.txt": "newscrawl_2011_1M", |
| | "fas_newscrawl_2011_300K-sentences.txt": "newscrawl_2011_300K", |
| | "fas_newscrawl_2011_30K-sentences.txt": "newscrawl_2011_30K", |
| | "fas_newscrawl_2015_100K-sentences.txt": "newscrawl_2015_100K", |
| | "fas_newscrawl_2015_10K-sentences.txt": "newscrawl_2015_10K", |
| | "fas_newscrawl_2015_1M-sentences.txt": "newscrawl_2015_1M", |
| | "fas_newscrawl_2015_300K-sentences.txt": "newscrawl_2015_300K", |
| | "fas_newscrawl_2015_30K-sentences.txt": "newscrawl_2015_30K", |
| | "fas_newscrawl_2016_100K-sentences.txt": "newscrawl_2016_100K", |
| | "fas_newscrawl_2016_10K-sentences.txt": "newscrawl_2016_10K", |
| | "fas_newscrawl_2016_1M-sentences.txt": "newscrawl_2016_1M", |
| | "fas_newscrawl_2016_300K-sentences.txt": "newscrawl_2016_300K", |
| | "fas_newscrawl_2016_30K-sentences.txt": "newscrawl_2016_30K", |
| | "fas_newscrawl_2017_100K-sentences.txt": "newscrawl_2017_100K", |
| | "fas_newscrawl_2017_10K-sentences.txt": "newscrawl_2017_10K", |
| | "fas_newscrawl_2017_1M-sentences.txt": "newscrawl_2017_1M", |
| | "fas_newscrawl_2017_300K-sentences.txt": "newscrawl_2017_300K", |
| | "fas_newscrawl_2017_30K-sentences.txt": "newscrawl_2017_30K", |
| | "fas_newscrawl_2019_100K-sentences.txt": "newscrawl_2019_100K", |
| | "fas_newscrawl_2019_10K-sentences.txt": "newscrawl_2019_10K", |
| | "fas_newscrawl_2019_1M-sentences.txt": "newscrawl_2019_1M", |
| | "fas_newscrawl_2019_300K-sentences.txt": "newscrawl_2019_300K", |
| | "fas_newscrawl_2019_30K-sentences.txt": "newscrawl_2019_30K", |
| | "fas_wikipedia_2010_100K-sentences.txt": "wikipedia_2010_100K", |
| | "fas_wikipedia_2010_10K-sentences.txt": "wikipedia_2010_10K", |
| | "fas_wikipedia_2010_300K-sentences.txt": "wikipedia_2010_300K", |
| | "fas_wikipedia_2010_30K-sentences.txt": "wikipedia_2010_30K", |
| | "fas_wikipedia_2012_100K-sentences.txt": "wikipedia_2012_100K", |
| | "fas_wikipedia_2012_10K-sentences.txt": "wikipedia_2012_10K", |
| | "fas_wikipedia_2012_300K-sentences.txt": "wikipedia_2012_300K", |
| | "fas_wikipedia_2012_30K-sentences.txt": "wikipedia_2012_30K", |
| | "fas_wikipedia_2014_100K-sentences.txt": "wikipedia_2014_100K", |
| | "fas_wikipedia_2014_10K-sentences.txt": "wikipedia_2014_10K", |
| | "fas_wikipedia_2014_1M-sentences.txt": "wikipedia_2014_1M", |
| | "fas_wikipedia_2014_300K-sentences.txt": "wikipedia_2014_300K", |
| | "fas_wikipedia_2014_30K-sentences.txt": "wikipedia_2014_30K", |
| | "poems_merged.txt": "poems", |
| | "TEP_fa.txt": "tep", |
| | "voa_persian_2003_2008_cleaned.txt": "voa", |
| | "w2c_merged.txt": "w2c", |
| | } |
| |
|
| |
|
| | def stats(tokens): |
| | global line_counter, sum_token_count, sum_token_sq, sum_char_count, sum_char_sq |
| | line_counter = line_counter + 1 |
| | sum_token_count = sum_token_count + len(tokens) |
| | sum_token_sq = sum_token_sq + len(tokens) * len(tokens) |
| | sum_char = sum([len(t) for t in tokens]) |
| | sum_char_count = sum_char_count + sum_char |
| | sum_char_sq = sum_char_sq + sum_char * sum_char |
| |
|
| |
|
| | output_folder = sys.argv[1] |
| | Path(output_folder).mkdir(parents=True, exist_ok=True) |
| |
|
| | for i in range(__FILE_COUNT__): |
| | fn = f"jomleh_{i+1}.jsonl" |
| | file_names.append(fn) |
| | |
| | record_counter[fn] = 0 |
| |
|
| | seen = set() |
| | tokens = [] |
| | for token in sys.stdin: |
| | token = token.strip() |
| | if token.startswith("<doc"): |
| | tokens = [] |
| | doc_id = doc_regex.match(token).groups()[0] |
| | ds_name = dataset_names[doc_id] if doc_id in dataset_names else doc_id |
| | source_dist[ds_name] = source_dist.get(ds_name, 0) + 1 |
| | continue |
| | if token == "</doc>": |
| | sentence = " ".join(tokens) |
| | if len(tokens) >= 10: |
| | stats(tokens) |
| | jsonl = json.dumps({"source": ds_name, "text": sentence}, ensure_ascii=False) |
| | fn = random.sample(file_names, 1)[0] |
| | |
| | record_counter[fn] += 1 |
| | elif sentence not in seen: |
| | seen.add(sentence) |
| | stats(tokens) |
| | jsonl = json.dumps({"source": ds_name, "text": sentence}, ensure_ascii=False) |
| | fn = random.sample(file_names, 1)[0] |
| | |
| | record_counter[fn] += 1 |
| | continue |
| | tokens.append(token) |
| |
|
| | |
| | |
| |
|
| | avg_tokens = sum_token_count / line_counter |
| | stddev_tokens = math.sqrt((sum_token_sq / line_counter) - avg_tokens * avg_tokens) |
| | avg_char = sum_char_count / sum_token_count |
| | stddev_chars = math.sqrt((sum_char_sq / sum_token_count) - avg_char * avg_char) |
| |
|
| | results = { |
| | "Number of records per each file": record_counter, |
| | "Number of samples from each source": source_dist, |
| | "Number of lines": line_counter, |
| | "Total number of words": sum_token_count, |
| | "Average number of tokens per line": avg_tokens, |
| | "Standard deviation for the number of tokens per line": stddev_tokens, |
| | "Average number of characters per token": avg_char, |
| | "Standard deviation for the number of characters per token": stddev_chars, |
| | } |
| |
|
| | print(json.dumps(results)) |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|