tangxuemei commited on
Commit
f91ae8c
·
verified ·
1 Parent(s): 7ab8897
Files changed (1) hide show
  1. main_backend.py +5 -6
main_backend.py CHANGED
@@ -20,12 +20,11 @@ RUNNING_STATUS = "RUNNING"
20
  FINISHED_STATUS = "FINISHED"
21
  FAILED_STATUS = "FAILED"
22
  # import os
23
- # os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
24
- # snapshot_download(repo_id=envs.RESULTS_REPO, revision="main",
25
- # local_dir=envs.EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
26
 
27
- # snapshot_download(repo_id=envs.QUEUE_REPO, revision="main",
28
- # local_dir=envs.EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
29
  # exit()
30
 
31
  def run_auto_eval(args):
@@ -112,7 +111,7 @@ def main():
112
  parser = argparse.ArgumentParser(description="Run auto evaluation with optional reproducibility feature")
113
 
114
  # Optional arguments
115
- parser.add_argument("--reproduce", type=bool, default=False, help="Reproduce the evaluation results")
116
  parser.add_argument("--model", type=str, default=None, help="Your Model ID")
117
  parser.add_argument("--precision", type=str, default="float16", help="Precision of your model")
118
  parser.add_argument("--publish", type=bool, default=False, help="whether directly publish the evaluation results on HF")
 
20
  FINISHED_STATUS = "FINISHED"
21
  FAILED_STATUS = "FAILED"
22
  # import os
23
+ snapshot_download(repo_id=envs.RESULTS_REPO, revision="main",
24
+ local_dir=envs.EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
 
25
 
26
+ snapshot_download(repo_id=envs.QUEUE_REPO, revision="main",
27
+ local_dir=envs.EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60)
28
  # exit()
29
 
30
  def run_auto_eval(args):
 
111
  parser = argparse.ArgumentParser(description="Run auto evaluation with optional reproducibility feature")
112
 
113
  # Optional arguments
114
+ parser.add_argument("--reproduce", type=bool, default=True, help="Reproduce the evaluation results")
115
  parser.add_argument("--model", type=str, default=None, help="Your Model ID")
116
  parser.add_argument("--precision", type=str, default="float16", help="Precision of your model")
117
  parser.add_argument("--publish", type=bool, default=False, help="whether directly publish the evaluation results on HF")