blanchon commited on
Commit
d0903bf
·
1 Parent(s): e3c6edf
Dockerfile CHANGED
@@ -12,7 +12,8 @@ ENV PYTHONUNBUFFERED=1 \
12
  UV_COMPILE_BYTECODE=1 \
13
  UV_CACHE_DIR=/tmp/uv-cache \
14
  PORT=${PORT} \
15
- TRANSPORT_SERVER_URL=${TRANSPORT_SERVER_URL}
 
16
 
17
  # Install system dependencies
18
  RUN apt-get update && apt-get install -y \
@@ -58,6 +59,10 @@ COPY --chown=appuser:appuser . .
58
  RUN --mount=type=cache,target=/tmp/uv-cache \
59
  uv sync --locked --no-editable --no-dev
60
 
 
 
 
 
61
  # Switch to non-root user
62
  USER appuser
63
 
 
12
  UV_COMPILE_BYTECODE=1 \
13
  UV_CACHE_DIR=/tmp/uv-cache \
14
  PORT=${PORT} \
15
+ TRANSPORT_SERVER_URL=${TRANSPORT_SERVER_URL} \
16
+ HF_HOME=/app/.cache
17
 
18
  # Install system dependencies
19
  RUN apt-get update && apt-get install -y \
 
59
  RUN --mount=type=cache,target=/tmp/uv-cache \
60
  uv sync --locked --no-editable --no-dev
61
 
62
+ # Create cache directories for Hugging Face with proper ownership
63
+ RUN mkdir -p /app/.cache && \
64
+ chown -R appuser:appuser /app/.cache
65
+
66
  # Switch to non-root user
67
  USER appuser
68
 
client/examples/basic-usage.ts CHANGED
@@ -1,6 +1,6 @@
1
  #!/usr/bin/env bun
2
  /**
3
- * Basic Usage Example for LeRobot Arena Inference Server TypeScript Client
4
  *
5
  * This example demonstrates how to:
6
  * 1. Create a client instance
@@ -234,7 +234,7 @@ async function quickExample() {
234
 
235
  // Run the main example
236
  if (import.meta.main) {
237
- console.log('=== LeRobot Arena Inference Server Client Example ===\n');
238
 
239
  // Choose which example to run based on command line argument
240
  const runQuick = process.argv.includes('--quick');
 
1
  #!/usr/bin/env bun
2
  /**
3
+ * Basic Usage Example for RobotHub Inference Server TypeScript Client
4
  *
5
  * This example demonstrates how to:
6
  * 1. Create a client instance
 
234
 
235
  // Run the main example
236
  if (import.meta.main) {
237
+ console.log('=== RobotHub Inference Server Client Example ===\n');
238
 
239
  // Choose which example to run based on command line argument
240
  const runQuick = process.argv.includes('--quick');
src/inference_server/export_openapi.py CHANGED
@@ -7,7 +7,6 @@ from typing import Any
7
  import yaml
8
  from fastapi.openapi.utils import get_openapi
9
 
10
-
11
  from inference_server.main import app
12
 
13
 
@@ -29,7 +28,7 @@ def create_custom_openapi_schema(app) -> dict[str, Any]:
29
  {"name": "Health", "description": "Health check and server status endpoints"},
30
  {
31
  "name": "Sessions",
32
- "description": "Inference session management - create, control, and monitor AI sessions",
33
  },
34
  {
35
  "name": "Control",
@@ -171,6 +170,7 @@ Examples:
171
  if args.validate:
172
  try:
173
  from openapi_spec_validator import validate_spec
 
174
  validate_spec(schema)
175
  print("✅ Schema validation passed")
176
  except ImportError:
 
7
  import yaml
8
  from fastapi.openapi.utils import get_openapi
9
 
 
10
  from inference_server.main import app
11
 
12
 
 
28
  {"name": "Health", "description": "Health check and server status endpoints"},
29
  {
30
  "name": "Sessions",
31
+ "description": "Inference session management - create, control, and monitor Inference Sessions",
32
  },
33
  {
34
  "name": "Control",
 
170
  if args.validate:
171
  try:
172
  from openapi_spec_validator import validate_spec
173
+
174
  validate_spec(schema)
175
  print("✅ Schema validation passed")
176
  except ImportError: