Spaces:
Sleeping
Sleeping
File size: 7,682 Bytes
63ed3a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 |
/**
* LeRobot Arena Inference Server TypeScript Client
*
* This client provides TypeScript access to the LeRobot Arena Inference Server
* for ACT (Action Chunking Transformer) model inference and session management.
*
* @example Basic Usage
* ```typescript
* import { LeRobotInferenceServerClient, CreateSessionRequest } from '@lerobot-arena/inference-server-client';
*
* const client = new LeRobotInferenceServerClient('http://localhost:8001');
*
* // Create and start a session
* const sessionRequest: CreateSessionRequest = {
* session_id: 'my-robot-01',
* policy_path: './checkpoints/act_so101_beyond',
* camera_names: ['front', 'wrist'],
* arena_server_url: 'http://localhost:8000'
* };
*
* const session = await client.createSession(sessionRequest);
* await client.startInference('my-robot-01');
*
* // Monitor session
* const status = await client.getSessionStatus('my-robot-01');
* console.log(`Session status: ${status.status}`);
* ```
*/
// Export all generated types and services from OpenAPI
export * from './generated';
// Import what we need for the convenience wrapper
import {
client,
createSessionSessionsPost,
listSessionsSessionsGet,
getSessionStatusSessionsSessionIdGet,
startInferenceSessionsSessionIdStartPost,
stopInferenceSessionsSessionIdStopPost,
restartInferenceSessionsSessionIdRestartPost,
deleteSessionSessionsSessionIdDelete,
healthCheckHealthGet,
getSystemInfoDebugSystemGet,
debugResetSessionDebugSessionsSessionIdResetPost,
getSessionQueueInfoDebugSessionsSessionIdQueueGet
} from './generated';
import type {
CreateSessionRequest,
CreateSessionResponse,
SessionStatusResponse
} from './generated';
/**
* LeRobot Arena Inference Server Client
*
* A convenience wrapper around the generated OpenAPI client that provides
* a simpler interface for common operations while maintaining full type safety.
*/
export class LeRobotInferenceServerClient {
private baseUrl: string;
constructor(baseUrl: string) {
this.baseUrl = baseUrl;
// Configure the generated client with the base URL
client.setConfig({ baseUrl });
}
/**
* Check if the inference server is healthy and responding
*/
async isHealthy(): Promise<boolean> {
try {
const response = await healthCheckHealthGet();
return !response.error;
} catch {
return false;
}
}
/**
* Get detailed server health information
*/
async getHealth() {
const response = await healthCheckHealthGet();
if (response.error) {
throw new Error(`Health check failed: ${JSON.stringify(response.error)}`);
}
return response.data;
}
/**
* Create a new inference session
*/
async createSession(request: CreateSessionRequest): Promise<CreateSessionResponse> {
const response = await createSessionSessionsPost({
body: request
});
if (response.error) {
throw new Error(`Failed to create session: ${JSON.stringify(response.error)}`);
}
return response.data!;
}
/**
* List all active sessions
*/
async listSessions(): Promise<SessionStatusResponse[]> {
const response = await listSessionsSessionsGet();
if (response.error) {
throw new Error(`Failed to list sessions: ${JSON.stringify(response.error)}`);
}
return response.data!;
}
/**
* Get detailed status of a specific session
*/
async getSessionStatus(sessionId: string): Promise<SessionStatusResponse> {
const response = await getSessionStatusSessionsSessionIdGet({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to get session status: ${JSON.stringify(response.error)}`);
}
return response.data!;
}
/**
* Start inference for a session
*/
async startInference(sessionId: string): Promise<void> {
const response = await startInferenceSessionsSessionIdStartPost({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to start inference: ${JSON.stringify(response.error)}`);
}
}
/**
* Stop inference for a session
*/
async stopInference(sessionId: string): Promise<void> {
const response = await stopInferenceSessionsSessionIdStopPost({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to stop inference: ${JSON.stringify(response.error)}`);
}
}
/**
* Restart inference for a session
*/
async restartInference(sessionId: string): Promise<void> {
const response = await restartInferenceSessionsSessionIdRestartPost({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to restart inference: ${JSON.stringify(response.error)}`);
}
}
/**
* Delete a session and clean up all resources
*/
async deleteSession(sessionId: string): Promise<void> {
const response = await deleteSessionSessionsSessionIdDelete({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to delete session: ${JSON.stringify(response.error)}`);
}
}
/**
* Wait for a session to reach a specific status
*/
async waitForSessionStatus(
sessionId: string,
targetStatus: string,
timeoutMs: number = 30000
): Promise<SessionStatusResponse> {
const startTime = Date.now();
while (Date.now() - startTime < timeoutMs) {
const status = await this.getSessionStatus(sessionId);
if (status.status === targetStatus) {
return status;
}
// Wait 1 second before checking again
await new Promise(resolve => setTimeout(resolve, 1000));
}
throw new Error(`Timeout waiting for session ${sessionId} to reach status ${targetStatus}`);
}
/**
* Convenience method to create a session and start inference in one call
*/
async createAndStartSession(request: CreateSessionRequest): Promise<{
session: CreateSessionResponse;
status: SessionStatusResponse;
}> {
const session = await this.createSession(request);
await this.startInference(request.session_id);
// Wait for it to be running
const status = await this.waitForSessionStatus(request.session_id, 'running');
return { session, status };
}
/**
* Get system information for debugging
*/
async getSystemInfo() {
const response = await getSystemInfoDebugSystemGet();
if (response.error) {
throw new Error(`Failed to get system info: ${JSON.stringify(response.error)}`);
}
return response.data;
}
/**
* Reset a session's internal state (debug method)
*/
async debugResetSession(sessionId: string): Promise<void> {
const response = await debugResetSessionDebugSessionsSessionIdResetPost({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to reset session: ${JSON.stringify(response.error)}`);
}
}
/**
* Get detailed information about a session's action queue
*/
async getSessionQueueInfo(sessionId: string) {
const response = await getSessionQueueInfoDebugSessionsSessionIdQueueGet({
path: { session_id: sessionId }
});
if (response.error) {
throw new Error(`Failed to get queue info: ${JSON.stringify(response.error)}`);
}
return response.data;
}
}
// Convenience function to create a client
export function createClient(baseUrl: string): LeRobotInferenceServerClient {
return new LeRobotInferenceServerClient(baseUrl);
}
// Export the old class name for backward compatibility
export const LeRobotAIServerClient = LeRobotInferenceServerClient; |