feat(05-03): add build_context and run_discussion_round to orchestrator

Add sequential model execution for discuss mode:
- build_context() converts discussion history to OpenAI message format
- run_discussion_round() queries models sequentially, each seeing prior responses

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Mikkel Georgsen 2026-01-16 19:43:05 +00:00
parent bed0fbcb3e
commit 9133d4eebf

View file

@ -8,6 +8,8 @@ import asyncio
import logging import logging
from moai.core.ai_client import get_ai_client from moai.core.ai_client import get_ai_client
from moai.core.models import Discussion, RoundType
from moai.core.services.discussion import create_message, create_round
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -69,3 +71,107 @@ async def query_models_parallel(
results = await asyncio.gather(*tasks) results = await asyncio.gather(*tasks)
return dict(results) return dict(results)
def build_context(discussion: Discussion) -> list[dict]:
"""Build conversation context from all rounds and messages in a discussion.
Converts the discussion history into OpenAI message format. The original
question becomes the first user message, and all model responses are
formatted as user messages with model attribution for context.
Args:
discussion: Discussion object with eager-loaded rounds and messages.
Returns:
List of message dicts in OpenAI format:
[{"role": "user", "content": "..."}]
"""
messages = []
# Original question as first message
messages.append({"role": "user", "content": discussion.question})
# Sort rounds by round_number
sorted_rounds = sorted(discussion.rounds, key=lambda r: r.round_number)
for round_ in sorted_rounds:
# Sort messages by timestamp within round
sorted_messages = sorted(round_.messages, key=lambda m: m.timestamp)
for msg in sorted_messages:
# Format: "**Model:** response" as user context
formatted = f"**{msg.model.title()}:** {msg.content}"
messages.append({"role": "user", "content": formatted})
return messages
async def run_discussion_round(
discussion: Discussion,
models: list[str],
project_name: str,
round_number: int,
) -> dict[str, str]:
"""Run a single round of sequential discussion.
Each model is queried in sequence, seeing all prior responses including
those from earlier in this same round. This creates a true sequential
discussion where GPT sees Claude's response before responding.
Args:
discussion: Discussion object with eager-loaded rounds and messages.
models: List of model short names in execution order.
project_name: Project name for context.
round_number: The round number being executed.
Returns:
Dict mapping model name to response text.
"""
client = get_ai_client()
# Build system prompt with participant info
models_str = ", ".join(models)
system_prompt = SYSTEM_PROMPT.format(models=models_str, topic=project_name)
# Create the round record
round_ = await create_round(
discussion_id=discussion.id,
round_number=round_number,
round_type=RoundType.SEQUENTIAL,
)
# Build initial context from prior rounds
context_messages = build_context(discussion)
# Store responses as we go (for sequential context building)
responses: dict[str, str] = {}
# Query each model SEQUENTIALLY
for model in models:
try:
response = await client.complete(
model=model,
messages=context_messages,
system_prompt=system_prompt,
)
logger.info("Model %s responded successfully (round %d)", model, round_number)
except Exception as e:
logger.error("Model %s failed (round %d): %s", model, round_number, e)
response = f"[Error: {e}]"
# Persist message
await create_message(
round_id=round_.id,
model=model,
content=response,
)
# Add to responses
responses[model] = response
# Add this response to context for next model in this round
formatted = f"**{model.title()}:** {response}"
context_messages.append({"role": "user", "content": formatted})
return responses