Skip to content

Commit 1a32e42

Browse files
authored
Merge pull request #1142 from roboflow/tony/easydebug
easy debug
2 parents 83e5d85 + 6358b92 commit 1a32e42

1 file changed

Lines changed: 41 additions & 0 deletions

File tree

debugrun.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import sys
2+
import uvicorn
3+
from inference.core.cache import cache
4+
from inference.core.env import MAX_ACTIVE_MODELS
5+
from inference.models.utils import ROBOFLOW_MODEL_TYPES
6+
from inference.core.interfaces.http.http_api import HttpInterface
7+
from inference.core.managers.active_learning import BackgroundTaskActiveLearningManager
8+
from inference.core.managers.decorators.fixed_size_cache import WithFixedSizeCache
9+
from inference.core.registries.roboflow import RoboflowModelRegistry
10+
11+
12+
"""
13+
convenient script to run server in debug
14+
(i.e. runs uvicorn directly)
15+
16+
It's a simplified version of docker/config/cpu_http.py
17+
18+
see https://www.loom.com/share/48f71894427a473cac39eca25f6ac759
19+
20+
- uv venv
21+
- source .venv/bin/activate
22+
- uv pip install -e .
23+
- # start debugrun.py in debug mode
24+
"""
25+
26+
model_registry = RoboflowModelRegistry(ROBOFLOW_MODEL_TYPES)
27+
model_manager = BackgroundTaskActiveLearningManager(
28+
model_registry=model_registry, cache=cache
29+
)
30+
31+
model_manager = WithFixedSizeCache(model_manager, max_size=MAX_ACTIVE_MODELS)
32+
model_manager.init_pingback()
33+
interface = HttpInterface(model_manager)
34+
app = interface.app
35+
36+
if __name__ == "__main__":
37+
try:
38+
uvicorn.run(app, host="127.0.0.1", port=9001)
39+
except Exception as e:
40+
print("Error starting server:", e)
41+
sys.exit(1)

0 commit comments

Comments
 (0)