File tree Expand file tree Collapse file tree
Expand file tree Collapse file tree Original file line number Diff line number Diff line change 1+ import sys
2+ import uvicorn
3+ from inference .core .cache import cache
4+ from inference .core .env import MAX_ACTIVE_MODELS
5+ from inference .models .utils import ROBOFLOW_MODEL_TYPES
6+ from inference .core .interfaces .http .http_api import HttpInterface
7+ from inference .core .managers .active_learning import BackgroundTaskActiveLearningManager
8+ from inference .core .managers .decorators .fixed_size_cache import WithFixedSizeCache
9+ from inference .core .registries .roboflow import RoboflowModelRegistry
10+
11+
12+ """
13+ convenient script to run server in debug
14+ (i.e. runs uvicorn directly)
15+
16+ It's a simplified version of docker/config/cpu_http.py
17+
18+ see https://www.loom.com/share/48f71894427a473cac39eca25f6ac759
19+
20+ - uv venv
21+ - source .venv/bin/activate
22+ - uv pip install -e .
23+ - # start debugrun.py in debug mode
24+ """
25+
26+ model_registry = RoboflowModelRegistry (ROBOFLOW_MODEL_TYPES )
27+ model_manager = BackgroundTaskActiveLearningManager (
28+ model_registry = model_registry , cache = cache
29+ )
30+
31+ model_manager = WithFixedSizeCache (model_manager , max_size = MAX_ACTIVE_MODELS )
32+ model_manager .init_pingback ()
33+ interface = HttpInterface (model_manager )
34+ app = interface .app
35+
36+ if __name__ == "__main__" :
37+ try :
38+ uvicorn .run (app , host = "127.0.0.1" , port = 9001 )
39+ except Exception as e :
40+ print ("Error starting server:" , e )
41+ sys .exit (1 )
You can’t perform that action at this time.
0 commit comments