Upload reupload_dataset.py
Browse files- reupload_dataset.py +248 -0
reupload_dataset.py
ADDED
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import login, hf_hub_download, list_repo_files
|
2 |
+
from datasets import Dataset, Audio, DatasetDict
|
3 |
+
import json
|
4 |
+
import shutil
|
5 |
+
import os
|
6 |
+
import sys
|
7 |
+
import readchar
|
8 |
+
import re
|
9 |
+
import subprocess
|
10 |
+
|
11 |
+
# This script has been designed to run on Windows, but *should* also run on linux too.
|
12 |
+
# このスクリプトはWindowsでの実行を想定していますが、Linuxでも動作するはずです。
|
13 |
+
|
14 |
+
# Configuration - fill these out
|
15 |
+
# 設定 - ここに入力してください
|
16 |
+
SOURCE_REPO_ID = "OOPPEENN/56697375616C4E6F76656C5F44617461736574"
|
17 |
+
SOURCE_REPO_SUBFOLDER = "GalGame"
|
18 |
+
|
19 |
+
HF_READ_TOKEN = ""
|
20 |
+
HF_WRITE_TOKEN = ""
|
21 |
+
|
22 |
+
TARGET_REPO_ID = "username/dataset-name" # dataset the original data will be uploaded to.
|
23 |
+
|
24 |
+
ARCHIVE_PASSWORD = "" # password for 7z archives
|
25 |
+
|
26 |
+
STATUS_FILE = "upload_status.json" # filename to track upload progress for the 'continue' option.
|
27 |
+
|
28 |
+
|
29 |
+
def display_menu(selected_index, options):
|
30 |
+
os.system('cls' if os.name == 'nt' else 'clear')
|
31 |
+
print("="*50)
|
32 |
+
print(" Dataset reuploader")
|
33 |
+
print("="*50)
|
34 |
+
print("Use up and down arrow keys to navigate. enter to select\n")
|
35 |
+
|
36 |
+
for i, option in enumerate(options):
|
37 |
+
if i == selected_index:
|
38 |
+
print(f" > {option}")
|
39 |
+
else:
|
40 |
+
print(f" {option}")
|
41 |
+
|
42 |
+
print("\n" + "="*50)
|
43 |
+
|
44 |
+
def download_process_and_upload(archive_filename):
|
45 |
+
# Assumes 7zip is installed and 7z utility is in path
|
46 |
+
# 7zipがインストールされており、7zユーティリティがパスに含まれていることを前提としています
|
47 |
+
|
48 |
+
downloaded_path = None
|
49 |
+
extract_dir = None
|
50 |
+
try:
|
51 |
+
# download:
|
52 |
+
print(f"-> Downloading '{archive_filename}' from {SOURCE_REPO_ID}...")
|
53 |
+
downloaded_path = hf_hub_download(
|
54 |
+
repo_id=SOURCE_REPO_ID, filename=archive_filename, repo_type="dataset",
|
55 |
+
subfolder=SOURCE_REPO_SUBFOLDER, token=HF_READ_TOKEN, local_dir="."
|
56 |
+
)
|
57 |
+
print(f" Successfully downloaded to: {downloaded_path}")
|
58 |
+
|
59 |
+
# prepar new data
|
60 |
+
archive_name_without_ext = os.path.splitext(os.path.basename(archive_filename))[0]
|
61 |
+
sanitized_config_name = re.sub(r'[\s\W]+', '_', archive_name_without_ext).strip('_')
|
62 |
+
extract_dir = f"./{sanitized_config_name}_extracted"
|
63 |
+
if os.path.exists(extract_dir): shutil.rmtree(extract_dir)
|
64 |
+
os.makedirs(extract_dir)
|
65 |
+
|
66 |
+
# extract archive
|
67 |
+
print(f"\n-> Extracting {archive_filename} using 7zip...")
|
68 |
+
|
69 |
+
command = [
|
70 |
+
'7z', 'x', downloaded_path,
|
71 |
+
f'-o{extract_dir}',
|
72 |
+
f'-p{ARCHIVE_PASSWORD}',
|
73 |
+
'-y'
|
74 |
+
]
|
75 |
+
|
76 |
+
result = subprocess.run(command, check=True, capture_output=True, text=True)
|
77 |
+
print(" Extraction complete.")
|
78 |
+
|
79 |
+
print("-> Searching for index.json...")
|
80 |
+
index_path, base_dir = None, None
|
81 |
+
for root, _, files in os.walk(extract_dir):
|
82 |
+
if 'index.json' in files:
|
83 |
+
index_path, base_dir = os.path.join(root, 'index.json'), root
|
84 |
+
break
|
85 |
+
|
86 |
+
if not index_path: raise FileNotFoundError("Could not find index.json in extracted files.")
|
87 |
+
|
88 |
+
print("-> Loading index.json...")
|
89 |
+
with open(index_path, 'r', encoding='utf-8') as f:
|
90 |
+
metadata = json.load(f)
|
91 |
+
|
92 |
+
print("-> Creating new dataset")
|
93 |
+
dataset_entries = []
|
94 |
+
for item in metadata:
|
95 |
+
text = item.get("Text")
|
96 |
+
if not text: continue
|
97 |
+
audio_path, voice_id = None, None
|
98 |
+
if "FilePath" in item:
|
99 |
+
relative_path = item["FilePath"].replace('\\', os.path.sep)
|
100 |
+
audio_path = os.path.join(base_dir, relative_path)
|
101 |
+
voice_id = os.path.splitext(os.path.basename(relative_path))[0]
|
102 |
+
else:
|
103 |
+
speaker, temp_voice_id = item.get("Speaker"), item.get("Voice")
|
104 |
+
if speaker and temp_voice_id:
|
105 |
+
audio_path = os.path.join(base_dir, speaker.strip(), f"{temp_voice_id.strip()}.ogg")
|
106 |
+
voice_id = temp_voice_id.strip()
|
107 |
+
if audio_path and voice_id and os.path.exists(audio_path):
|
108 |
+
dataset_entries.append({"audio": audio_path, "text": text, "audio_ID": f"{voice_id}__{sanitized_config_name}"})
|
109 |
+
|
110 |
+
if not dataset_entries:
|
111 |
+
print(f" [WARNING] No valid audio files were found for {archive_filename}. Skipping upload.")
|
112 |
+
return True
|
113 |
+
|
114 |
+
new_dataset = Dataset.from_list(dataset_entries).cast_column("audio", Audio(sampling_rate=48000))
|
115 |
+
print(f" Processed {len(new_dataset)} valid entries.")
|
116 |
+
|
117 |
+
new_dataset_dict = DatasetDict({"train": new_dataset})
|
118 |
+
|
119 |
+
print(f"\n-> Pushing {len(new_dataset)} new rows to hub as configuration '{sanitized_config_name}'...")
|
120 |
+
new_dataset_dict.push_to_hub(
|
121 |
+
repo_id=TARGET_REPO_ID,
|
122 |
+
config_name=sanitized_config_name,
|
123 |
+
token=HF_WRITE_TOKEN
|
124 |
+
)
|
125 |
+
|
126 |
+
print(f"\n##### Success! '{archive_filename}' uploaded as a new configuration #####")
|
127 |
+
return True
|
128 |
+
|
129 |
+
except subprocess.CalledProcessError as e:
|
130 |
+
print(f"\n[ERROR] 7-Zip failed to extract {archive_filename}.")
|
131 |
+
print(f" Return Code: {e.returncode}")
|
132 |
+
print(f" Output: {e.stdout}")
|
133 |
+
print(f" Error Output: {e.stderr}")
|
134 |
+
return False
|
135 |
+
except Exception as e:
|
136 |
+
print(f"\n[ERROR] An error occurred while processing {archive_filename}: {e}")
|
137 |
+
import traceback
|
138 |
+
traceback.print_exc()
|
139 |
+
return False
|
140 |
+
finally:
|
141 |
+
print("-> Cleaning files...")
|
142 |
+
if downloaded_path and os.path.exists(downloaded_path):
|
143 |
+
os.remove(downloaded_path)
|
144 |
+
containing_folder = os.path.dirname(downloaded_path)
|
145 |
+
if os.path.basename(containing_folder) == SOURCE_REPO_SUBFOLDER and not os.listdir(containing_folder):
|
146 |
+
try:
|
147 |
+
os.rmdir(containing_folder)
|
148 |
+
parent_folder = os.path.dirname(containing_folder)
|
149 |
+
if os.path.basename(parent_folder) == SOURCE_REPO_ID.split('/')[1] and not os.listdir(parent_folder):
|
150 |
+
os.rmdir(parent_folder)
|
151 |
+
except OSError:
|
152 |
+
pass
|
153 |
+
if extract_dir and os.path.exists(extract_dir):
|
154 |
+
shutil.rmtree(extract_dir)
|
155 |
+
print(" Cleanup complete.")
|
156 |
+
|
157 |
+
|
158 |
+
def reupload_single_archive():
|
159 |
+
os.system('cls' if os.name == 'nt' else 'clear')
|
160 |
+
print("="*50); print(" Reupload Single Archive"); print("="*50)
|
161 |
+
|
162 |
+
filename = input("Enter the filename of .7z archive to process:\n> ")
|
163 |
+
if not filename.endswith('.7z'):
|
164 |
+
print("\nInvalid filename. Please provide the full name ending in .7z")
|
165 |
+
return
|
166 |
+
|
167 |
+
download_process_and_upload(filename)
|
168 |
+
|
169 |
+
def continue_uploading_archives():
|
170 |
+
os.system('cls' if os.name == 'nt' else 'clear')
|
171 |
+
print("="*50); print(" Continue Uploading Archives"); print("="*50)
|
172 |
+
|
173 |
+
# Get the full list of archives from hub
|
174 |
+
print("\n-> Fetching file list from source repo...")
|
175 |
+
try:
|
176 |
+
all_repo_files = list_repo_files(SOURCE_REPO_ID, repo_type="dataset", token=HF_READ_TOKEN)
|
177 |
+
archive_filenames = sorted([os.path.basename(f) for f in all_repo_files if f.startswith(f"{SOURCE_REPO_SUBFOLDER}/") and f.endswith('.7z')])
|
178 |
+
if not archive_filenames:
|
179 |
+
print("No .7z files found in the repo. Nothing to do.")
|
180 |
+
return
|
181 |
+
print(f" Found {len(archive_filenames)} total archives.")
|
182 |
+
except Exception as e:
|
183 |
+
print(f"[ERROR] Could not fetch file list: {e}")
|
184 |
+
return
|
185 |
+
|
186 |
+
# load/create the status file
|
187 |
+
status_data = {}
|
188 |
+
if os.path.exists(STATUS_FILE):
|
189 |
+
print(f"-> Found existing status file '{STATUS_FILE}'. Loading progress")
|
190 |
+
with open(STATUS_FILE, 'r') as f:
|
191 |
+
status_data = json.load(f)
|
192 |
+
else:
|
193 |
+
print(f"-> No status file found. Creating '{STATUS_FILE}'.")
|
194 |
+
|
195 |
+
# create list of files to process
|
196 |
+
files_to_process = [f for f in archive_filenames if not status_data.get(f, False)]
|
197 |
+
total_to_process = len(files_to_process)
|
198 |
+
if not files_to_process:
|
199 |
+
print("\nAll archives have already been processed. Nothing to do!")
|
200 |
+
return
|
201 |
+
|
202 |
+
print(f" {total_to_process} archives remaining to be processed.")
|
203 |
+
|
204 |
+
# Loop through files and process
|
205 |
+
for i, filename in enumerate(files_to_process):
|
206 |
+
print("\n" + "="*50)
|
207 |
+
print(f"(Processing archive {i+1} / {total_to_process})")
|
208 |
+
print("="*50)
|
209 |
+
|
210 |
+
success = download_process_and_upload(filename)
|
211 |
+
|
212 |
+
if success:
|
213 |
+
status_data[filename] = True
|
214 |
+
with open(STATUS_FILE, 'w') as f:
|
215 |
+
json.dump(status_data, f, indent=4)
|
216 |
+
print(f" Updated status for '{filename}' to complete.")
|
217 |
+
else:
|
218 |
+
print(f"\n[WARNING!] Processing failed for '{filename}'")
|
219 |
+
return # stop process on failure
|
220 |
+
|
221 |
+
print("\n\n" + "="*50)
|
222 |
+
print(" ALL ARCHIVES PROCESSED SUCCESSFULLY!")
|
223 |
+
print("="*50)
|
224 |
+
|
225 |
+
def main():
|
226 |
+
print("Logging into huggingface hub with write access...")
|
227 |
+
|
228 |
+
login(token=HF_WRITE_TOKEN, add_to_git_credential=True)
|
229 |
+
|
230 |
+
selected_index = 0
|
231 |
+
menu_options = ["Reupload single archive", "Continue uploading archives", "Exit"]
|
232 |
+
while True:
|
233 |
+
display_menu(selected_index, menu_options)
|
234 |
+
key = readchar.readkey()
|
235 |
+
|
236 |
+
if key == readchar.key.UP: selected_index = (selected_index - 1) % len(menu_options)
|
237 |
+
|
238 |
+
elif key == readchar.key.DOWN: selected_index = (selected_index + 1) % len(menu_options)
|
239 |
+
elif key == readchar.key.ENTER:
|
240 |
+
if selected_index == 0: reupload_single_archive()
|
241 |
+
elif selected_index == 1: continue_uploading_archives()
|
242 |
+
elif selected_index == 2: print("Exiting."); sys.exit(0)
|
243 |
+
input("\nPress Enter to return to the main menu")
|
244 |
+
elif key in (readchar.key.CTRL_C, readchar.key.CTRL_D):
|
245 |
+
print("Exiting... Goodbye."); sys.exit(0)
|
246 |
+
|
247 |
+
if __name__ == "__main__":
|
248 |
+
main()
|