Skip to content

Commit 5675895

Browse files
Merge branch 'master' into deepseek-support
2 parents e2403ca + 1e70643 commit 5675895

9 files changed

Lines changed: 287 additions & 14 deletions

File tree

API.md

Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
# Bolna API Documentation
2+
3+
## Endpoints
4+
5+
### Get Agent
6+
Retrieves an agent's information by agent id.
7+
8+
**Endpoint:** `GET /agent/{agent_id}`
9+
10+
**Parameters:**
11+
- `agent_id` (path) - string, required: Unique identifier of the agent
12+
13+
### Create Agent
14+
Creates a new agent with specified configuration.
15+
16+
**Endpoint:** `POST /agent`
17+
18+
**Request Body:**
19+
```json
20+
{
21+
{
22+
"agent_config": {
23+
"agent_name": "Alfred",
24+
"agent_type": "other",
25+
"agent_welcome_message": "How are you doing Bruce?",
26+
"tasks": [
27+
{
28+
"task_type": "conversation",
29+
"toolchain": {
30+
"execution": "parallel",
31+
"pipelines": [
32+
[
33+
"transcriber",
34+
"llm",
35+
"synthesizer"
36+
]
37+
]
38+
},
39+
"tools_config": {
40+
"input": {
41+
"format": "wav",
42+
"provider": "twilio"
43+
},
44+
"llm_agent": {
45+
"agent_type": "simple_llm_agent",
46+
"agent_flow_type": "streaming",
47+
"routes": null,
48+
"llm_config": {
49+
"agent_flow_type": "streaming",
50+
"provider": "openai",
51+
"request_json": true,
52+
"model": "gpt-4o-mini"
53+
}
54+
},
55+
"output": {
56+
"format": "wav",
57+
"provider": "twilio"
58+
},
59+
"synthesizer": {
60+
"audio_format": "wav",
61+
"provider": "elevenlabs",
62+
"stream": true,
63+
"provider_config": {
64+
"voice": "George",
65+
"model": "eleven_turbo_v2_5",
66+
"voice_id": "JBFqnCBsd6RMkjVDRZzb"
67+
},
68+
"buffer_size": 100.0
69+
},
70+
"transcriber": {
71+
"encoding": "linear16",
72+
"language": "en",
73+
"provider": "deepgram",
74+
"stream": true
75+
}
76+
},
77+
"task_config": {
78+
"hangup_after_silence": 30.0
79+
}
80+
}
81+
]
82+
},
83+
"agent_prompts": {
84+
"task_1": {
85+
"system_prompt": "Why Do We Fall, Sir? So That We Can Learn To Pick Ourselves Up."
86+
}
87+
}
88+
}
89+
90+
91+
92+
}
93+
```
94+
95+
**Response:**
96+
```json
97+
200 OK
98+
{
99+
"agent_id": "uuid-string",
100+
"state": "created"
101+
}
102+
```
103+
104+
### Edit Agent
105+
Updates an existing agent's configuration.
106+
107+
**Endpoint:** `PUT /agent/{agent_id}`
108+
109+
**Parameters:**
110+
- `agent_id` (path) - string, required: Unique identifier of the agent
111+
112+
**Request Body:**
113+
Same as Create Agent endpoint
114+
115+
116+
### Delete Agent
117+
Deletes an agent from the system.
118+
119+
**Endpoint:** `DELETE /agent/{agent_id}`
120+
121+
**Parameters:**
122+
- `agent_id` (path) - string, required: Unique identifier of the agent
123+
124+
**Response:**
125+
```json
126+
200 OK
127+
{
128+
"agent_id": "string",
129+
"state": "deleted"
130+
}
131+
```
132+
133+
134+
### Get All Agents
135+
Retrieves all agents from the system.
136+
137+
**Endpoint:** `GET /all`
138+
139+
**Response:**
140+
```json
141+
200 OK
142+
{
143+
"agents": [
144+
{
145+
"agent_id": "string",
146+
"data": {
147+
// Agent configuration object
148+
}
149+
}
150+
]
151+
}
152+
```

bolna/agent_manager/task_manager.py

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -510,10 +510,12 @@ def __setup_output_handlers(self, turn_based_conversation, output_queue):
510510
def __setup_input_handlers(self, turn_based_conversation, input_queue, should_record):
511511
if self.task_config["tools_config"]["input"]["provider"] in SUPPORTED_INPUT_HANDLERS.keys():
512512
logger.info(f"Connected through dashboard {turn_based_conversation}")
513-
input_kwargs = {"queues": self.queues,
514-
"websocket": self.websocket,
515-
"input_types": get_required_input_types(self.task_config),
516-
"mark_set": self.mark_set}
513+
input_kwargs = {
514+
"queues": self.queues,
515+
"websocket": self.websocket,
516+
"input_types": get_required_input_types(self.task_config),
517+
"mark_set": self.mark_set
518+
}
517519

518520
if self.task_config["tools_config"]["input"]["provider"] == "daily":
519521
input_kwargs['room_url'] = self.room_url
@@ -581,7 +583,8 @@ def __setup_synthesizer(self, llm_config=None):
581583
self.task_config["tools_config"]["synthesizer"]["stream"] = True if self.enforce_streaming else False #Hardcode stream to be False as we don't want to get blocked by a __listen_synthesizer co-routine
582584

583585
self.tools["synthesizer"] = synthesizer_class(**self.task_config["tools_config"]["synthesizer"], **provider_config, **self.kwargs, caching=caching)
584-
self.synthesizer_monitor_task = asyncio.create_task(self.tools['synthesizer'].monitor_connection())
586+
if not self.turn_based_conversation:
587+
self.synthesizer_monitor_task = asyncio.create_task(self.tools['synthesizer'].monitor_connection())
585588
if self.task_config["tools_config"]["llm_agent"] is not None and llm_config is not None:
586589
llm_config["buffer_size"] = self.task_config["tools_config"]["synthesizer"].get('buffer_size')
587590

@@ -928,7 +931,6 @@ def __update_preprocessed_tree_node(self):
928931
# LLM task
929932
##############################################################
930933
async def _handle_llm_output(self, next_step, text_chunk, should_bypass_synth, meta_info, is_filler = False):
931-
932934
logger.info("received text from LLM for output processing: {} which belongs to sequence id {}".format(text_chunk, meta_info['sequence_id']))
933935
if "request_id" not in meta_info:
934936
meta_info["request_id"] = str(uuid.uuid4())
@@ -1192,7 +1194,7 @@ async def _process_conversation_task(self, message, sequence, meta_info):
11921194
logger.info("agent flow is not preprocessed")
11931195

11941196
start_time = time.time()
1195-
should_bypass_synth = 'bypass_synth' in meta_info and meta_info['bypass_synth'] == True
1197+
should_bypass_synth = 'bypass_synth' in meta_info and meta_info['bypass_synth'] is True
11961198
next_step = self._get_next_step(sequence, "llm")
11971199
meta_info['llm_start_time'] = time.time()
11981200
route = None
@@ -1926,7 +1928,15 @@ async def __first_message(self, timeout=10.0):
19261928
text = self.kwargs.get('agent_welcome_message', None)
19271929
logger.info(f"Generating {text}")
19281930
meta_info = {'io': self.tools["output"].get_provider(), 'message_category': 'agent_welcome_message', 'stream_sid': stream_sid, "request_id": str(uuid.uuid4()), "cached": True, "sequence_id": -1, 'format': self.task_config["tools_config"]["output"]["format"], 'text': text}
1929-
await self._synthesize(create_ws_data_packet(text, meta_info=meta_info))
1931+
if self.turn_based_conversation:
1932+
meta_info['type'] = 'text'
1933+
bos_packet = create_ws_data_packet("<beginning_of_stream>", meta_info)
1934+
await self.tools["output"].handle(bos_packet)
1935+
await self.tools["output"].handle(create_ws_data_packet(text, meta_info))
1936+
eos_packet = create_ws_data_packet("<end_of_stream>", meta_info)
1937+
await self.tools["output"].handle(eos_packet)
1938+
else:
1939+
await self._synthesize(create_ws_data_packet(text, meta_info=meta_info))
19301940
break
19311941
else:
19321942
logger.info(f"Stream id is still None, so not passing it")
@@ -1990,12 +2000,9 @@ async def run(self):
19902000

19912001
logger.info(f"Starting the first message task {self.enforce_streaming}")
19922002
self.output_task = asyncio.create_task(self.__process_output_loop())
2003+
self.first_message_task = asyncio.create_task(self.__first_message())
19932004
if not self.turn_based_conversation or self.enforce_streaming:
19942005
logger.info(f"Setting up other servers")
1995-
self.first_message_task = asyncio.create_task(self.__first_message())
1996-
#if not self.use_llm_to_determine_hangup :
1997-
# By default we will hang up after x amount of silence
1998-
# We still need to
19992006
self.hangup_task = asyncio.create_task(self.__check_for_completion())
20002007

20012008
if self.should_backchannel:

bolna/input_handlers/default.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import asyncio
22
import base64
33
import time
4+
import uuid
45
from dotenv import load_dotenv
56
from bolna.helpers.logger_config import configure_logger
67
from bolna.helpers.utils import create_ws_data_packet
@@ -28,6 +29,9 @@ async def stop_handler(self):
2829
except Exception as e:
2930
logger.error(f"Error closing WebSocket: {e}")
3031

32+
def get_stream_sid(self):
33+
return str(uuid.uuid4())
34+
3135
def __process_audio(self, audio):
3236
data = base64.b64decode(audio)
3337
ws_data_packet = create_ws_data_packet(

local_setup/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,4 @@ Once the docker containers are up, you can now start to create your agents and i
2020

2121

2222
## Example agents to create, use and start making calls
23-
Go to the [Bolna wiki](https://github.com/bolna-ai/bolna/wiki) to try out sample agents.
23+
Go to the [Bolna examples](https://examples.bolna.dev/) to try out sample agents.

local_setup/dockerfiles/bolna_server.Dockerfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-reco
1111
RUN --mount=type=cache,target=/root/.cache/pip \
1212
pip install git+https://github.com/bolna-ai/bolna@master
1313
COPY quickstart_server.py /app/
14+
COPY presets /app/presets
1415

1516
EXPOSE 5001
1617

File renamed without changes.
File renamed without changes.
File renamed without changes.

local_setup/quickstart_server.py

Lines changed: 110 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import asyncio
33
import uuid
44
import traceback
5-
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException, Query
5+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException, Query,Body
66
from fastapi.middleware.cors import CORSMiddleware
77
import redis.asyncio as redis
88
from dotenv import load_dotenv
@@ -36,6 +36,22 @@ class CreateAgentPayload(BaseModel):
3636
agent_prompts: Optional[Dict[str, Dict[str, str]]]
3737

3838

39+
@app.get("/agent/{agent_id}")
40+
async def get_agent(agent_id: str):
41+
"""Fetches an agent's information by ID."""
42+
try:
43+
agent_data = await redis_client.get(agent_id)
44+
if not agent_data:
45+
raise HTTPException(status_code=404, detail="Agent not found")
46+
47+
return json.loads(agent_data)
48+
49+
except Exception as e:
50+
logger.error(f"Error fetching agent {agent_id}: {e}", exc_info=True)
51+
raise HTTPException(status_code=500, detail="Internal server error")
52+
53+
54+
3955
@app.post("/agent")
4056
async def create_agent(agent_data: CreateAgentPayload):
4157
agent_uuid = str(uuid.uuid4())
@@ -66,6 +82,99 @@ async def create_agent(agent_data: CreateAgentPayload):
6682
return {"agent_id": agent_uuid, "state": "created"}
6783

6884

85+
@app.put("/agent/{agent_id}")
86+
async def edit_agent(agent_id: str, agent_data: CreateAgentPayload = Body(...)):
87+
"""Edits an existing agent based on the provided agent_id."""
88+
try:
89+
90+
existing_data = await redis_client.get(agent_id)
91+
if not existing_data:
92+
raise HTTPException(status_code=404, detail="Agent not found")
93+
94+
existing_data = json.loads(existing_data)
95+
96+
97+
new_data = agent_data.agent_config.model_dump()
98+
new_data["assistant_status"] = "updated"
99+
agent_prompts = agent_data.agent_prompts
100+
101+
logger.info(f"Updating Agent {agent_id}: {new_data}")
102+
103+
104+
for index, task in enumerate(new_data.get("tasks", [])):
105+
if task.get("task_type") == "extraction":
106+
extraction_prompt_llm = os.getenv("EXTRACTION_PROMPT_GENERATION_MODEL")
107+
if not extraction_prompt_llm:
108+
raise HTTPException(status_code=500, detail="Extraction model not configured")
109+
110+
extraction_prompt_generation_llm = LiteLLM(model=extraction_prompt_llm, max_tokens=2000)
111+
extraction_details = task["tools_config"]["llm_agent"].get("extraction_details", "")
112+
113+
extraction_prompt = await extraction_prompt_generation_llm.generate(
114+
messages=[
115+
{"role": "system", "content": EXTRACTION_PROMPT_GENERATION_PROMPT},
116+
{"role": "user", "content": extraction_details}
117+
]
118+
)
119+
120+
new_data["tasks"][index]["tools_config"]["llm_agent"]["extraction_json"] = extraction_prompt
121+
122+
123+
stored_prompt_file_path = f"{agent_id}/conversation_details.json"
124+
await asyncio.gather(
125+
redis_client.set(agent_id, json.dumps(new_data)),
126+
store_file(file_key=stored_prompt_file_path, file_data=agent_prompts, local=True)
127+
)
128+
129+
return {"agent_id": agent_id, "state": "updated"}
130+
131+
except Exception as e:
132+
logger.error(f"Error updating agent {agent_id}: {e}", exc_info=True)
133+
raise HTTPException(status_code=500, detail="Internal server error")
134+
135+
@app.delete("/agent/{agent_id}")
136+
async def delete_agent(agent_id: str):
137+
"""Deletes an agent by ID."""
138+
try:
139+
agent_exists = await redis_client.exists(agent_id)
140+
if not agent_exists:
141+
raise HTTPException(status_code=404, detail="Agent not found")
142+
143+
await redis_client.delete(agent_id)
144+
return {"agent_id": agent_id, "state": "deleted"}
145+
146+
except Exception as e:
147+
logger.error(f"Error deleting agent {agent_id}: {e}", exc_info=True)
148+
raise HTTPException(status_code=500, detail="Internal server error")
149+
150+
151+
@app.get("/all")
152+
async def get_all_agents():
153+
"""Fetches all agents stored in Redis."""
154+
try:
155+
156+
agent_keys = await redis_client.keys("*")
157+
158+
if not agent_keys:
159+
return {"agents": []}
160+
agents_data = []
161+
for key in agent_keys:
162+
try:
163+
data = await redis_client.get(key)
164+
agents_data.append(data)
165+
except Exception as e:
166+
logger.error(f"An error occurred with key {key}: {e}")
167+
168+
169+
agents = [{ "agent_id": key, "data": json.loads(data) } for key, data in zip(agent_keys, agents_data) if data]
170+
171+
return {"agents": agents}
172+
173+
except Exception as e:
174+
logger.error(f"Error fetching all agents: {e}", exc_info=True)
175+
raise HTTPException(status_code=500, detail="Internal server error")
176+
177+
69178
#############################################################################################
70179
# Websocket
71180
#############################################################################################

0 commit comments

Comments
 (0)