juyoung-trl commited on
Commit
b025a99
·
verified ·
1 Parent(s): 377ca1b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +108 -0
README.md CHANGED
@@ -30,3 +30,111 @@ configs:
30
  - split: ja
31
  path: data/ja-*
32
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  - split: ja
31
  path: data/ja-*
32
  ---
33
+
34
+
35
+ ```python
36
+
37
+ from datasets import load_dataset, Dataset
38
+ from itertools import islice
39
+ from operator import itemgetter
40
+ import re
41
+
42
+ def extract_dump_date(dump_str):
43
+ """Extract date from dump string like 'CC-MAIN-2024-10'"""
44
+ if not dump_str:
45
+ return '0' # For items without dump field
46
+ match = re.search(r'(\d{4}-\d{2})', dump_str)
47
+ return match.group(1) if match else '0'
48
+
49
+ def load_and_verify_datasets():
50
+ # Load streaming datasets
51
+ datasets = {
52
+ "ko": load_dataset("HuggingFaceFW/fineweb-2", "kor_Hang", split="test", streaming=True),
53
+ "zh": load_dataset("HuggingFaceFW/fineweb-2", "cmn_Hani", split="test", streaming=True),
54
+ "en": load_dataset("HuggingFaceFW/fineweb-edu", "CC-MAIN-2024-10", split="train", streaming=True),
55
+ "ja": load_dataset("HuggingFaceFW/fineweb-2", "jpn_Jpan", split="test", streaming=True),
56
+ }
57
+
58
+ processed_datasets = {}
59
+
60
+ for lang, ds in datasets.items():
61
+ print(f"\nProcessing {lang} dataset...")
62
+
63
+ # Collect items with their dumps
64
+ items_with_dumps = []
65
+ for item in islice(ds, 100000): # Collect first 100K items
66
+ dump = item.get('dump', '')
67
+ items_with_dumps.append((item, dump))
68
+
69
+ # Sort by dump date in descending order (most recent first)
70
+ items_with_dumps.sort(key=lambda x: extract_dump_date(x[1]), reverse=True)
71
+
72
+ # Print dump distribution of sorted data
73
+ print("\nDump distribution (most recent first):")
74
+ dump_counts = {}
75
+ for _, dump in items_with_dumps[:1000]: # Check first 1000 items
76
+ dump_counts[dump] = dump_counts.get(dump, 0) + 1
77
+ for dump, count in sorted(dump_counts.items(), key=lambda x: extract_dump_date(x[0]), reverse=True):
78
+ print(f" {dump}: {count} items")
79
+
80
+ texts_set = set()
81
+ filtered_texts = []
82
+
83
+ # Process sorted items
84
+ for item, dump in items_with_dumps:
85
+ text = item.get('text', item.get('content', '')).strip()
86
+
87
+ # Basic quality filters
88
+ if text and len(text) > 50: # Only keep non-empty texts with reasonable length
89
+ texts_set.add(text)
90
+ if len(texts_set) >= 15000:
91
+ break
92
+
93
+ # Convert set to list and create dataset
94
+ filtered_texts = list(texts_set)
95
+ processed_datasets[lang] = Dataset.from_dict({"text": filtered_texts})
96
+ print(f"\n{lang} dataset final size: {len(processed_datasets[lang])} examples")
97
+
98
+ # Print sample texts with their dumps
99
+ print("\nSample texts from most recent dump:")
100
+ samples_shown = 0
101
+ for item, dump in items_with_dumps:
102
+ if samples_shown >= 2:
103
+ break
104
+ text = item.get('text', item.get('content', '')).strip()
105
+ if text in texts_set:
106
+ print(f"Dump: {dump}")
107
+ print(f"Length: {len(text)}")
108
+ print(f"Text preview: {text[:100]}...")
109
+ print("---")
110
+ samples_shown += 1
111
+
112
+ return processed_datasets
113
+
114
+ def main():
115
+ try:
116
+ datasets = load_and_verify_datasets()
117
+ print("\nDatasets processed with the following sizes:")
118
+ for lang, ds in datasets.items():
119
+ print(f"{lang}: {len(ds)} examples")
120
+
121
+ # Create DatasetDict
122
+ print("\nCreating DatasetDict...")
123
+ dataset_dict = create_dataset_dict(datasets)
124
+
125
+ # Upload to Hugging Face
126
+ # Replace with your values
127
+ REPO_NAME = "yourname/reponame"
128
+ HF_TOKEN = "your_api_key" # Don't share this token!
129
+
130
+ print("\nUploading to Hugging Face Hub...")
131
+ upload_to_huggingface(dataset_dict, REPO_NAME, HF_TOKEN)
132
+ print(f"\nDataset uploaded successfully to {REPO_NAME}")
133
+
134
+
135
+ except Exception as e:
136
+ print(f"Error processing datasets: {str(e)}")
137
+
138
+ if __name__ == "__main__":
139
+ main()
140
+ ```