Spaces:
Sleeping
Sleeping
tangxuemei
commited on
- src/envs.py +3 -3
src/envs.py
CHANGED
@@ -4,7 +4,7 @@ from huggingface_hub import HfApi
|
|
4 |
|
5 |
|
6 |
# replace this with our token
|
7 |
-
TOKEN=os.environ.get("HF_TOKEN", None)
|
8 |
# print(TOKEN)
|
9 |
# OWNER = "vectara"
|
10 |
# REPO_ID = f"{OWNER}/Humanlike"
|
@@ -19,7 +19,7 @@ REPO_ID = f"{OWNER}/Humanlike"
|
|
19 |
QUEUE_REPO = f"{OWNER}/requests"
|
20 |
RESULTS_REPO = f"{OWNER}/results"
|
21 |
|
22 |
-
print(RESULTS_REPO)
|
23 |
CACHE_PATH=os.getenv("HF_HOME", ".")
|
24 |
|
25 |
# Local caches
|
@@ -27,7 +27,7 @@ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
|
27 |
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
28 |
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
29 |
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
30 |
-
print(
|
31 |
# exit()
|
32 |
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #"cpu"
|
33 |
API = HfApi(token=TOKEN)
|
|
|
4 |
|
5 |
|
6 |
# replace this with our token
|
7 |
+
TOKEN = os.environ.get("HF_TOKEN", None)
|
8 |
# print(TOKEN)
|
9 |
# OWNER = "vectara"
|
10 |
# REPO_ID = f"{OWNER}/Humanlike"
|
|
|
19 |
QUEUE_REPO = f"{OWNER}/requests"
|
20 |
RESULTS_REPO = f"{OWNER}/results"
|
21 |
|
22 |
+
# print(RESULTS_REPO)
|
23 |
CACHE_PATH=os.getenv("HF_HOME", ".")
|
24 |
|
25 |
# Local caches
|
|
|
27 |
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
28 |
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
29 |
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
30 |
+
# print(EVAL_RESULTS_PATH)
|
31 |
# exit()
|
32 |
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #"cpu"
|
33 |
API = HfApi(token=TOKEN)
|