feat(05-03): implement /discuss command handler with round limit
Add /discuss [rounds] command that: - Requires active discussion from /open - Stores discussion state in user_data for /next and /stop - Runs sequential rounds via run_discussion_round - Shows Round N/M progress indicator Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
9133d4eebf
commit
104eceb246
2 changed files with 112 additions and 3 deletions
|
|
@ -7,7 +7,7 @@ project management, discussion commands, and export functionality.
|
||||||
from telegram.ext import Application, CommandHandler
|
from telegram.ext import Application, CommandHandler
|
||||||
|
|
||||||
from moai.bot.handlers.commands import help_command, start_command
|
from moai.bot.handlers.commands import help_command, start_command
|
||||||
from moai.bot.handlers.discussion import ask_command, open_command
|
from moai.bot.handlers.discussion import ask_command, discuss_command, open_command
|
||||||
from moai.bot.handlers.projects import project_command, projects_command
|
from moai.bot.handlers.projects import project_command, projects_command
|
||||||
from moai.bot.handlers.status import status_command
|
from moai.bot.handlers.status import status_command
|
||||||
|
|
||||||
|
|
@ -32,3 +32,4 @@ def register_handlers(app: Application) -> None:
|
||||||
# Discussion / Q&A
|
# Discussion / Q&A
|
||||||
app.add_handler(CommandHandler("ask", ask_command))
|
app.add_handler(CommandHandler("ask", ask_command))
|
||||||
app.add_handler(CommandHandler("open", open_command))
|
app.add_handler(CommandHandler("open", open_command))
|
||||||
|
app.add_handler(CommandHandler("discuss", discuss_command))
|
||||||
|
|
|
||||||
|
|
@ -6,8 +6,14 @@ from telegram.ext import ContextTypes
|
||||||
from moai.bot.handlers.projects import get_selected_project
|
from moai.bot.handlers.projects import get_selected_project
|
||||||
from moai.core.ai_client import MODEL_MAP, get_ai_client
|
from moai.core.ai_client import MODEL_MAP, get_ai_client
|
||||||
from moai.core.models import DiscussionType, RoundType
|
from moai.core.models import DiscussionType, RoundType
|
||||||
from moai.core.orchestrator import query_models_parallel
|
from moai.core.orchestrator import query_models_parallel, run_discussion_round
|
||||||
from moai.core.services.discussion import create_discussion, create_message, create_round
|
from moai.core.services.discussion import (
|
||||||
|
create_discussion,
|
||||||
|
create_message,
|
||||||
|
create_round,
|
||||||
|
get_active_discussion,
|
||||||
|
get_discussion,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def ask_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
async def ask_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||||
|
|
@ -137,3 +143,105 @@ async def open_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> No
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
await update.message.reply_text(f"Error: {e}")
|
await update.message.reply_text(f"Error: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def discuss_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||||
|
"""Handle /discuss [rounds] command - start sequential multi-round discussion.
|
||||||
|
|
||||||
|
Requires a selected project with configured models and an active discussion
|
||||||
|
(created via /open). Starts a sequential discussion where each model sees
|
||||||
|
prior responses.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
/discuss - Start 3-round discussion (default)
|
||||||
|
/discuss 5 - Start 5-round discussion
|
||||||
|
"""
|
||||||
|
args = context.args or []
|
||||||
|
|
||||||
|
# Parse optional round limit (default: 3)
|
||||||
|
round_limit = 3
|
||||||
|
if args:
|
||||||
|
try:
|
||||||
|
round_limit = int(args[0])
|
||||||
|
if round_limit < 1:
|
||||||
|
await update.message.reply_text("Round limit must be at least 1.")
|
||||||
|
return
|
||||||
|
except ValueError:
|
||||||
|
await update.message.reply_text(
|
||||||
|
f"Invalid round limit: {args[0]}\n\nUsage: /discuss [rounds]"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Require a selected project
|
||||||
|
project = await get_selected_project(context)
|
||||||
|
if project is None:
|
||||||
|
await update.message.reply_text("No project selected. Use /project select <name> first.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Require configured models
|
||||||
|
if not project.models:
|
||||||
|
await update.message.reply_text(
|
||||||
|
"No models configured for this project.\n"
|
||||||
|
"Use /project models claude,gpt,gemini to set models."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Check for active discussion
|
||||||
|
discussion = await get_active_discussion(project.id)
|
||||||
|
if discussion is None:
|
||||||
|
await update.message.reply_text(
|
||||||
|
"No active discussion. Start one with /open <question> first."
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate next round number (continue from existing rounds)
|
||||||
|
current_round_num = len(discussion.rounds) + 1
|
||||||
|
|
||||||
|
# Store discussion state for /next and /stop
|
||||||
|
context.user_data["discussion_state"] = {
|
||||||
|
"discussion_id": discussion.id,
|
||||||
|
"project_id": project.id,
|
||||||
|
"project_name": project.name,
|
||||||
|
"models": project.models,
|
||||||
|
"current_round": current_round_num,
|
||||||
|
"round_limit": round_limit,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show typing indicator
|
||||||
|
await update.message.chat.send_action("typing")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Reload discussion with full eager loading for context building
|
||||||
|
discussion = await get_discussion(discussion.id)
|
||||||
|
|
||||||
|
# Run first round
|
||||||
|
responses = await run_discussion_round(
|
||||||
|
discussion=discussion,
|
||||||
|
models=project.models,
|
||||||
|
project_name=project.name,
|
||||||
|
round_number=current_round_num,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build response text
|
||||||
|
response_lines = [f"*Round {current_round_num}/{round_limit}:*\n"]
|
||||||
|
for model, response in responses.items():
|
||||||
|
response_lines.append(f"*{model.title()}:*\n{response}\n")
|
||||||
|
|
||||||
|
if current_round_num >= round_limit:
|
||||||
|
response_lines.append(f"\n_Discussion complete ({round_limit} rounds)._")
|
||||||
|
# Clear state when done
|
||||||
|
del context.user_data["discussion_state"]
|
||||||
|
else:
|
||||||
|
response_lines.append(
|
||||||
|
f"\n_Round {current_round_num}/{round_limit} complete. Use /next or /stop._"
|
||||||
|
)
|
||||||
|
# Update current round for next time
|
||||||
|
context.user_data["discussion_state"]["current_round"] = current_round_num + 1
|
||||||
|
|
||||||
|
await update.message.reply_text(
|
||||||
|
"\n".join(response_lines),
|
||||||
|
parse_mode="Markdown",
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
await update.message.reply_text(f"Error: {e}")
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue