Chat Session Example#
This example demonstrates a complete interactive chat session using the staff scheduling model.
Example Code#
1# ruff: noqa: T201
2import argparse
3import asyncio
4import json
5import os
6import readline # noqa: F401
7from importlib.resources import files
8from pathlib import Path
9
10from rich.console import Console
11from rich.markdown import Markdown
12from rich.panel import Panel
13from rich.status import Status
14from rich.syntax import Syntax
15from rich.text import Text
16
17from decision_ai.cli_tools.exceptions import TomlValidationError
18from decision_ai.cli_tools.toml import TomlData
19from decision_ai.client.chat_session import ChatSession
20from decision_ai.client.client import DecisionAI
21from decision_ai.client.exceptions import OutdatedVersionError, UnsupportedVersionError
22from decision_ai.examples import StaffSchedulingInput, staff_scheduling_module
23
24console = Console()
25
26
27def parse_slash_command(user_input: str) -> tuple[str | None, str | None]:
28 """Parse user input for slash commands.
29
30 Args:
31 user_input: The raw user input string
32
33 Returns:
34 Tuple of (command, argument) where command is the slash command and argument is any text after
35 Returns (None, None) if not a slash command
36 """
37 if user_input.startswith("/"):
38 parts = user_input[1:].split(None, 1) # Split on first whitespace
39 command = parts[0].lower()
40 argument = parts[1] if len(parts) > 1 else None
41 return command, argument
42 return None, None
43
44
45def get_model_id_from_toml_config() -> str | None:
46 config = TomlData(f"{files('decision_ai.examples.staff_scheduling')}/pyproject.toml")
47 try:
48 return config.get_section("tool.decision_ai.model_id")["id"]
49 except TomlValidationError:
50 return None
51
52
53async def interactive_chat(chat_session: ChatSession) -> None:
54 """Interactive chat session with markdown rendering."""
55 # Persistent reasoning state - None means use default, True/False are explicit settings
56 reasoning_enabled: bool | None = None
57
58 async with chat_session.connect() as chat:
59 while True:
60 user_input = input("\nYour prompt: ")
61
62 # Check for slash commands
63 command, argument = parse_slash_command(user_input)
64
65 if command == "exit":
66 console.print(
67 f"[bold red]Exiting chat with session ID: {chat_session.chat_session_id} "
68 f"and model ID: {chat_session.opt_model_id}[/bold red]"
69 )
70 break
71
72 # Handle slash commands
73 if command == "reasoning_on":
74 reasoning_enabled = True
75 console.print("[bold green]✓ Reasoning enabled for future messages[/bold green]")
76 continue
77 if command == "reasoning_off":
78 reasoning_enabled = False
79 console.print("[bold red]✗ Reasoning disabled for future messages[/bold red]")
80 continue
81 if command == "status":
82 # Calculate panel width as 2/3 of terminal width
83 panel_width = int(console.width * 2 / 3)
84
85 user_panel = Panel(
86 Text("/status", justify="right"),
87 border_style="blue",
88 title="You",
89 title_align="right",
90 width=panel_width,
91 )
92 console.print(user_panel, justify="right")
93
94 # Get current state and pretty print it with syntax highlighting
95 with chat_session.remote_state() as state:
96 # Add current reasoning state to the display
97 state_dict = state.model_dump()
98 state_dict["current_reasoning_enabled"] = reasoning_enabled
99 json_str = json.dumps(state_dict, indent=2)
100 syntax = Syntax(json_str, "json", theme="monokai", word_wrap=True)
101 assistant_panel = Panel(
102 syntax,
103 border_style="green",
104 title="Assistant - Chat Session State",
105 title_align="left",
106 width=panel_width,
107 )
108 console.print(assistant_panel, justify="left")
109 continue
110 if command is not None:
111 console.print(f"[bold red]Unknown command: /{command}[/bold red]")
112 continue
113
114 # Regular message - not a command
115 # Calculate panel width as 3/4 of terminal width
116 panel_width = int(console.width * 3 / 4)
117
118 # Show reasoning status if enabled
119 display_prompt = user_input
120 if reasoning_enabled is not None:
121 reasoning_status = "🧠" if reasoning_enabled else ""
122 display_prompt = f"{reasoning_status} {user_input}"
123
124 # Regular chat message handling
125 user_panel = Panel(
126 Text(display_prompt, justify="right"),
127 border_style="blue",
128 title="You",
129 title_align="right",
130 width=panel_width,
131 )
132 console.print(user_panel, justify="right")
133
134 console.print("[bold green]Sending...[/bold green]")
135 async for message in chat.stream_messages(
136 prompt=user_input, llm_iteration_limit=6, reasoning_enabled=reasoning_enabled
137 ):
138 if hasattr(message.message, "kind") and message.message.kind == "reasoning":
139 reasoning_panel = Panel(
140 Markdown(str(message.message)),
141 border_style="yellow",
142 title="🧠 Assistant Reasoning",
143 title_align="left",
144 width=panel_width,
145 )
146 console.print(reasoning_panel, justify="left")
147 elif hasattr(message.message, "kind") and message.message.kind in ["chat_response", "tool_call"]:
148 assistant_panel = Panel(
149 Markdown(str(message.message)),
150 border_style="green",
151 title="Assistant",
152 title_align="left",
153 width=panel_width,
154 )
155 console.print(assistant_panel, justify="left")
156 elif hasattr(message.message, "kind") and message.message.kind == "tool_output":
157 title = f"🔧 {message.message.kind.replace('_', ' ').title()}"
158 assistant_panel = Panel(
159 Markdown(str(message.message)),
160 border_style="cyan",
161 title=title,
162 title_align="left",
163 width=panel_width,
164 )
165 console.print(assistant_panel, justify="left")
166
167
168if __name__ == "__main__":
169 parser = argparse.ArgumentParser(description="Run an interactive chat session for staff scheduling.")
170 parser.add_argument("--chat-id", type=str, help="The ID of the chat session to continue.", default=None)
171 parser.add_argument("--model-id", type=str, help="The ID of the model to use.", default=None)
172 parser.add_argument("--toml", action="store_true", help="Use the toml file to deploy the model.")
173 parser.add_argument(
174 "--examples-dir", type=str, help="Directory containing in-context learning examples (XML files).", default=None
175 )
176 args = parser.parse_args()
177
178 # Validate that --toml is not used with other parameters
179 if args.toml and args.model_id:
180 error_msg = "Cannot use --toml with --model-id"
181 console.print(f"[bold red]{error_msg}[/bold red]")
182 raise ValueError(error_msg)
183
184 # Validate that --chat-id and --model-id are not used together
185 if args.model_id and args.chat_id:
186 error_msg = "Cannot provide both --chat-id and --model-id"
187 console.print(f"[bold red]{error_msg}[/bold red]")
188 raise ValueError(error_msg)
189
190 input_data = StaffSchedulingInput.load_example()
191 # Create a decision AI client. Make sure to set the QUANTAGONIA_API_KEY environment variable
192 # before constructing the client.
193 assert os.getenv("QUANTAGONIA_API_KEY") is not None, "QUANTAGONIA_API_KEY environment variable must be set"
194 decision_ai = DecisionAI()
195
196 try:
197 decision_ai.assert_version_compatibility()
198 except (OutdatedVersionError, UnsupportedVersionError) as e:
199 console.print(f"[bold yellow] Warning:[/bold yellow] {e}")
200
201 # Initialize model_id to None
202 model_id = None
203
204 # Handle --toml parameter
205 if args.toml:
206 model_id = get_model_id_from_toml_config()
207 if model_id is None:
208 error_msg = (
209 "Model ID not found in the TOML configuration file. "
210 "Please run `decision_ai deploy` to deploy the model."
211 )
212 raise ValueError(error_msg)
213 console.print(f"[bold]Using model ID from pyproject.toml:[/bold] {model_id}")
214 elif args.model_id:
215 model_id = args.model_id
216 elif not args.chat_id and not args.model_id:
217 # This create a model and uploads the code to the DecisionAI platform.
218 # Note that for production systems, you should use the CLI to deploy and maintain the model.
219 with Status("[bold yellow]Deploying model...", spinner="dots") as status:
220 examples_path = None
221 if args.examples_dir is not None:
222 examples_path = Path(args.examples_dir)
223 if not examples_path.exists() or not examples_path.is_dir():
224 console.print(f"[bold yellow]Warning:[/bold yellow] Examples directory not found: {examples_path}")
225 examples_path = None
226 model = decision_ai.deploy_model(
227 staff_scheduling_module,
228 "ExampleStaffScheduling",
229 examples_dir=examples_path,
230 )
231 console.print(f"\n[bold]Model deployed with ID:[/bold] {model.id}")
232 model_id = model.id
233
234 # Print help text
235 help_panel = Panel(
236 Text.from_markup(
237 "[bold]Available Slash Commands[/bold]\n\n"
238 "[cyan]/status[/cyan] - Show current chat session state\n"
239 "[cyan]/exit[/cyan] - Quit the chat session\n"
240 "[cyan]/reasoning_on[/cyan] - Enable reasoning for future messages\n"
241 "[cyan]/reasoning_off[/cyan] - Disable reasoning for future messages\n"
242 ),
243 title="Help",
244 border_style="yellow",
245 )
246 console.print(help_panel)
247
248 # Create a chat session with the model.
249 if not args.chat_id:
250 if args.model_id:
251 model_id = args.model_id
252 if model_id is None:
253 error_msg = "Model ID is required when not continuing an existing chat session"
254 raise ValueError(error_msg)
255 chat_session = decision_ai.create_chat_session(input_data, model_id)
256 else:
257 chat_session = decision_ai.continue_chat_session(args.chat_id)
258 console.print(f"\n[bold]Connecting to Chat Session ID:[/bold] {chat_session.chat_session_id}")
259 asyncio.run(interactive_chat(chat_session))
Code Walkthrough#
The example demonstrates several key concepts:
Loading Input Data
The example uses a predefined staff scheduling input data model:
input_data = StaffSchedulingInput.load_example()
Creating and Deploying Model
For demonstration purposes, it creates and deploys the model directly:
model = decision_ai.deploy_model( staff_scheduling_model, "ExampleStaffScheduling" )
The module
staff_scheduling_modelis a predefined model that is used to demonstrate the chat session. You can see the code of the model in Staff Scheduling Example.Note: In production, you should use the CLI tools for model deployment.
Interactive Chat Session
The
interactive_chatfunction shows how to:Connect to the chat session
Handle user input
Process responses
Format output using Rich for better readability
Running the Example#
To run this example:
Set your API key:
export QUANTAGONIA_API_KEY=your_api_key
Run the example:
python -m decision_ai.examples.staff_scheduling.chat_example
Example Interactions#
Here are some example interactions you can try:
“Ernie and Bert don’t want to work together in the same shifts”
“Linda should not work on her birthday”
“Solve the model”