diff --git a/langsmith_tracing/basic/starter.py b/langsmith_tracing/basic/starter.py index 901ae9b5..4a2a103a 100644 --- a/langsmith_tracing/basic/starter.py +++ b/langsmith_tracing/basic/starter.py @@ -13,7 +13,16 @@ PROJECT_NAME = "langsmith-basic" -async def main(): +@traceable( + name="Basic LLM Request", + run_type="chain", + # CRITICAL: Client-side @traceable runs outside the LangSmithPlugin's scope. + # Make sure client-side traces use the same project_name as what is given to + # the plugin. + project_name=PROJECT_NAME, + tags=["client-side"], +) +async def main() -> str: add_temporal_runs = "--add-temporal-runs" in sys.argv config = ClientConfig.load_client_connect_config() @@ -29,25 +38,14 @@ async def main(): plugins=[plugin], ) - @traceable( - name="Basic LLM Request", - run_type="chain", - # CRITICAL: Client-side @traceable runs outside the LangSmithPlugin's scope. - # Make sure client-side traces use the same project_name as what is given to - # # the plugin. - project_name=PROJECT_NAME, - tags=["client-side"], + result = await client.execute_workflow( + BasicLLMWorkflow.run, + "What is Temporal?", + id="langsmith-basic-workflow-id", + task_queue="langsmith-basic-task-queue", ) - async def run_workflow(prompt: str) -> str: - return await client.execute_workflow( - BasicLLMWorkflow.run, - prompt, - id="langsmith-basic-workflow-id", - task_queue="langsmith-basic-task-queue", - ) - - result = await run_workflow("What is Temporal?") print(f"Workflow result: {result}") + return result if __name__ == "__main__": diff --git a/langsmith_tracing/basic/worker.py b/langsmith_tracing/basic/worker.py index 140c87da..941b28c5 100644 --- a/langsmith_tracing/basic/worker.py +++ b/langsmith_tracing/basic/worker.py @@ -28,16 +28,14 @@ async def main(): add_temporal_runs=add_temporal_runs, ) - client = await Client.connect( - **config, - plugins=[plugin], - ) + client = await Client.connect(**config) worker = Worker( client, task_queue="langsmith-basic-task-queue", workflows=[BasicLLMWorkflow], activities=[call_openai], + plugins=[plugin], ) label = "with" if add_temporal_runs else "without" diff --git a/langsmith_tracing/chatbot/activities.py b/langsmith_tracing/chatbot/activities.py index 1994f792..5e979320 100644 --- a/langsmith_tracing/chatbot/activities.py +++ b/langsmith_tracing/chatbot/activities.py @@ -6,10 +6,22 @@ from langsmith import traceable from langsmith.wrappers import wrap_openai from openai import AsyncOpenAI -from openai.types.responses import Response +from pydantic import BaseModel, Field from temporalio import activity +class ToolCall(BaseModel): + call_id: str + name: str + arguments: str + + +class ChatResponse(BaseModel): + id: str + output_text: str = "" + tool_calls: list[ToolCall] = Field(default_factory=list) + + @dataclass class OpenAIRequest: model: str @@ -25,7 +37,7 @@ class OpenAIRequest: @traceable(name="Call OpenAI", run_type="llm") @activity.defn -async def call_openai(request: OpenAIRequest) -> Response: +async def call_openai(request: OpenAIRequest) -> ChatResponse: """Call OpenAI Responses API. Retries handled by Temporal, not the OpenAI client.""" # wrap_openai patches the client so each API call (e.g. responses.create) # creates its own child span with model parameters and token usage. @@ -42,4 +54,13 @@ async def call_openai(request: OpenAIRequest) -> Response: response_args["tools"] = request.tools if request.previous_response_id: response_args["previous_response_id"] = request.previous_response_id - return await client.responses.create(**response_args) + response = await client.responses.create(**response_args) + return ChatResponse( + id=response.id, + output_text=response.output_text or "", + tool_calls=[ + ToolCall(call_id=item.call_id, name=item.name, arguments=item.arguments) + for item in response.output + if item.type == "function_call" + ], + ) diff --git a/langsmith_tracing/chatbot/worker.py b/langsmith_tracing/chatbot/worker.py index 0f43e7c9..b2bd2c98 100644 --- a/langsmith_tracing/chatbot/worker.py +++ b/langsmith_tracing/chatbot/worker.py @@ -22,15 +22,14 @@ async def main(): config = ClientConfig.load_client_connect_config() config.setdefault("target_host", "localhost:7233") + plugin = LangSmithPlugin( + project_name="langsmith-chatbot", + add_temporal_runs=add_temporal_runs, + ) + client = await Client.connect( **config, data_converter=pydantic_data_converter, - plugins=[ - LangSmithPlugin( - project_name="langsmith-chatbot", - add_temporal_runs=add_temporal_runs, - ) - ], ) worker = Worker( @@ -38,6 +37,7 @@ async def main(): task_queue="langsmith-chatbot-task-queue", workflows=[ChatbotWorkflow], activities=[call_openai], + plugins=[plugin], ) label = "with" if add_temporal_runs else "without" diff --git a/langsmith_tracing/chatbot/workflows.py b/langsmith_tracing/chatbot/workflows.py index 4b25df50..47205611 100644 --- a/langsmith_tracing/chatbot/workflows.py +++ b/langsmith_tracing/chatbot/workflows.py @@ -147,25 +147,23 @@ async def _traced(): self._previous_response_id = response.id tool_results = [] - for item in response.output: - if item.type != "function_call": - continue - args = json.loads(item.arguments) - if item.name == "save_note": + for tc in response.tool_calls: + args = json.loads(tc.arguments) + if tc.name == "save_note": result = self._save_note(args["name"], args["content"]) tool_results.append( { "type": "function_call_output", - "call_id": item.call_id, + "call_id": tc.call_id, "output": result, } ) - elif item.name == "read_note": + elif tc.name == "read_note": result = self._read_note(args["name"]) tool_results.append( { "type": "function_call_output", - "call_id": item.call_id, + "call_id": tc.call_id, "output": result, } ) diff --git a/tests/langsmith_tracing/helpers.py b/tests/langsmith_tracing/helpers.py index 6742a60f..afd3d1c4 100644 --- a/tests/langsmith_tracing/helpers.py +++ b/tests/langsmith_tracing/helpers.py @@ -1,33 +1,8 @@ """Shared test helpers for LangSmith tracing tests.""" -from openai.types.responses import Response -from openai.types.responses.response_output_message import ResponseOutputMessage -from openai.types.responses.response_output_text import ResponseOutputText +from langsmith_tracing.chatbot.activities import ChatResponse -def make_text_response(text: str) -> Response: - """Build a minimal OpenAI Response with a text output.""" - return Response.model_construct( - id="resp_mock", - created_at=0.0, - model="gpt-4o-mini", - object="response", - output=[ - ResponseOutputMessage.model_construct( - id="msg_mock", - type="message", - role="assistant", - status="completed", - content=[ - ResponseOutputText.model_construct( - type="output_text", - text=text, - annotations=[], - ) - ], - ) - ], - parallel_tool_calls=False, - tool_choice="auto", - tools=[], - ) +def make_text_response(text: str) -> ChatResponse: + """Build a minimal ChatResponse with a text output.""" + return ChatResponse(id="resp_mock", output_text=text) diff --git a/tests/langsmith_tracing/test_chatbot.py b/tests/langsmith_tracing/test_chatbot.py index f7845217..2febb654 100644 --- a/tests/langsmith_tracing/test_chatbot.py +++ b/tests/langsmith_tracing/test_chatbot.py @@ -1,42 +1,29 @@ import json import uuid -from openai.types.responses import Response -from openai.types.responses.response_function_tool_call import ( - ResponseFunctionToolCall, -) from temporalio import activity from temporalio.client import Client from temporalio.contrib.langsmith import LangSmithPlugin from temporalio.testing import WorkflowEnvironment from temporalio.worker import Worker -from langsmith_tracing.chatbot.activities import OpenAIRequest +from langsmith_tracing.chatbot.activities import ChatResponse, OpenAIRequest, ToolCall from langsmith_tracing.chatbot.workflows import ChatbotWorkflow from tests.langsmith_tracing.helpers import make_text_response def _make_function_call_response( name: str, arguments: dict, call_id: str = "call_123" -) -> Response: - return Response.model_construct( +) -> ChatResponse: + return ChatResponse( id="resp_tool", - created_at=0.0, - model="gpt-4o-mini", - object="response", - output=[ - ResponseFunctionToolCall.model_construct( - id="fc_mock", - type="function_call", + tool_calls=[ + ToolCall( + call_id=call_id, name=name, arguments=json.dumps(arguments), - call_id=call_id, - status="completed", ) ], - parallel_tool_calls=False, - tool_choice="auto", - tools=[], ) @@ -45,7 +32,7 @@ async def test_chatbot_save_note(client: Client, env: WorkflowEnvironment): call_count = 0 @activity.defn(name="call_openai") - async def mock_call_openai(request: OpenAIRequest) -> Response: + async def mock_call_openai(request: OpenAIRequest) -> ChatResponse: nonlocal call_count call_count += 1 if call_count == 1: @@ -86,7 +73,7 @@ async def test_chatbot_read_note(client: Client, env: WorkflowEnvironment): call_count = 0 @activity.defn(name="call_openai") - async def mock_call_openai(request: OpenAIRequest) -> Response: + async def mock_call_openai(request: OpenAIRequest) -> ChatResponse: nonlocal call_count call_count += 1 if call_count == 1: