Spaces:
Sleeping
Sleeping
File size: 4,531 Bytes
63ed3a7 3380376 63ed3a7 6e558c0 63ed3a7 3380376 63ed3a7 3380376 6e558c0 63ed3a7 3380376 63ed3a7 3380376 63ed3a7 3380376 63ed3a7 3380376 63ed3a7 3380376 63ed3a7 6e558c0 63ed3a7 3380376 63ed3a7 6e558c0 63ed3a7 3380376 63ed3a7 3380376 63ed3a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import argparse
import logging
import sys
import uvicorn
from inference_server.export_openapi import export_openapi_schema
from inference_server.simple_integrated import launch_simple_integrated_app
def setup_logging(debug: bool = False):
"""Set up logging configuration."""
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
level=level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(sys.stdout),
],
)
def launch_server_only(host: str = "0.0.0.0", port: int = 8001, reload: bool = True):
"""Launch only the AI server."""
print(f"🚀 Starting RobotHub Inference Server on {host}:{port}")
uvicorn.run(
"inference_server.main:app",
host=host,
port=port,
reload=reload,
log_level="info",
)
def launch_integrated_app(
host: str = "0.0.0.0", port: int = 7860, share: bool = False, debug: bool = False
):
"""Launch the integrated app (UI + Server)."""
print(f"🎨 Starting Integrated RobotHub App on {host}:{port}")
setup_logging(debug)
launch_simple_integrated_app(host=host, port=port, share=share)
def main():
"""Main CLI entry point."""
parser = argparse.ArgumentParser(
description="RobotHub Inference Server CLI",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Launch integrated app (recommended)
python -m inference_server.cli
# Launch only the server
python -m inference_server.cli --server-only
# Launch with custom ports
python -m inference_server.cli --port 7861
# Launch with public sharing (Gradio)
python -m inference_server.cli --share
# Export OpenAPI schema
python -m inference_server.cli --export-openapi
# Export as YAML
python -m inference_server.cli --export-openapi --export-format yaml
""",
)
# Mode selection
mode_group = parser.add_mutually_exclusive_group()
mode_group.add_argument(
"--server-only", action="store_true", help="Launch only the AI server"
)
# Server configuration
parser.add_argument(
"--server-host", default="0.0.0.0", help="AI server host (default: localhost)"
)
parser.add_argument(
"--server-port", type=int, default=8001, help="AI server port (default: 8001)"
)
parser.add_argument(
"--no-reload", action="store_true", help="Disable auto-reload for server"
)
# App configuration
parser.add_argument(
"--host", default="0.0.0.0", help="App host (default: localhost)"
)
parser.add_argument(
"--port", type=int, default=7860, help="App port (default: 7860)"
)
parser.add_argument(
"--share", action="store_true", help="Create public Gradio link"
)
# General options
parser.add_argument("--debug", action="store_true", help="Enable debug logging")
# Export options
mode_group.add_argument(
"--export-openapi", action="store_true", help="Export OpenAPI schema to file"
)
parser.add_argument(
"--export-format",
choices=["json", "yaml"],
default="json",
help="OpenAPI export format (default: json)",
)
parser.add_argument(
"--export-output",
help="OpenAPI export output file (default: openapi.json or openapi.yaml)",
)
args = parser.parse_args()
try:
# Route to appropriate function
if args.server_only:
launch_server_only(
host=args.server_host, port=args.server_port, reload=not args.no_reload
)
elif args.export_openapi:
# Export OpenAPI schema
output_file = args.export_output
if output_file is None:
output_file = f"openapi.{args.export_format}"
print(f"📄 Exporting OpenAPI schema to {output_file}")
export_openapi_schema(
output_file=output_file, format_type=args.export_format
)
else:
# Launch integrated app (default)
print("🚀 Launching integrated RobotHub Inference Server + UI")
launch_integrated_app(
host=args.host, port=args.port, share=args.share, debug=args.debug
)
except KeyboardInterrupt:
print("\n🛑 Stopped by user")
sys.exit(0)
except Exception as e:
print(f"❌ Error: {e}")
raise e from e
if __name__ == "__main__":
main()
|