From 090418ecd2bd5bbd22e1508b0d3c6b2d65a6cc17 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 28 Feb 2025 11:02:12 -0500 Subject: [PATCH 1/4] update anthropic --- libs/partners/anthropic/langchain_anthropic/chat_models.py | 7 +++++++ .../anthropic/tests/integration_tests/test_chat_models.py | 2 ++ 2 files changed, 9 insertions(+) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index c5ca716f66252..384bb98a1e6c9 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -900,6 +900,8 @@ def _format_output(self, data: Any, **kwargs: Any) -> ChatResult: llm_output = { k: v for k, v in data_dict.items() if k not in ("content", "role", "type") } + if "model" in llm_output: + llm_output["model_name"] = llm_output["model"] if ( len(content) == 1 and content[0]["type"] == "text" @@ -1445,9 +1447,14 @@ def _make_message_chunk_from_anthropic_event( # See https://github.com/anthropics/anthropic-sdk-python/blob/main/src/anthropic/lib/streaming/_messages.py # noqa: E501 if event.type == "message_start" and stream_usage: usage_metadata = _create_usage_metadata(event.message.usage) + if hasattr(event.message, "model"): + response_metadata = {"model_name": event.message.model} + else: + response_metadata = {} message_chunk = AIMessageChunk( content="" if coerce_content_to_string else [], usage_metadata=usage_metadata, + response_metadata=response_metadata, ) elif ( event.type == "content_block_start" diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index 9a07d6ad84690..b5f1e59ced132 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -62,6 +62,7 @@ def test_stream() -> None: ) assert "stop_reason" in full.response_metadata assert "stop_sequence" in full.response_metadata + assert "model_name" in full.response_metadata async def test_astream() -> None: @@ -219,6 +220,7 @@ async def test_ainvoke() -> None: result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) assert isinstance(result.content, str) + assert "model_name" in result.response_metadata def test_invoke() -> None: From 2fb585cdfe6ddebc7da2fd17761e90d838b37905 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 28 Feb 2025 11:07:59 -0500 Subject: [PATCH 2/4] update mistral --- libs/partners/mistralai/langchain_mistralai/chat_models.py | 6 +++++- .../mistralai/tests/integration_tests/test_chat_models.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index 4ab31613ed8ac..aaa8c43780f04 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -579,7 +579,11 @@ def _create_chat_result(self, response: Dict) -> ChatResult: ) generations.append(gen) - llm_output = {"token_usage": token_usage, "model": self.model} + llm_output = { + "token_usage": token_usage, + "model_name": self.model, + "model": self.model, # Backwards compatability + } return ChatResult(generations=generations, llm_output=llm_output) def _create_message_dicts( diff --git a/libs/partners/mistralai/tests/integration_tests/test_chat_models.py b/libs/partners/mistralai/tests/integration_tests/test_chat_models.py index bc16dba4e55ee..d3592ef32fdce 100644 --- a/libs/partners/mistralai/tests/integration_tests/test_chat_models.py +++ b/libs/partners/mistralai/tests/integration_tests/test_chat_models.py @@ -87,6 +87,7 @@ async def test_ainvoke() -> None: result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]}) assert isinstance(result.content, str) + assert "model_name" in result.response_metadata def test_invoke() -> None: From 298daec0ae0b8f6dc93dfd686b65aba5cffac43c Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 28 Feb 2025 11:09:56 -0500 Subject: [PATCH 3/4] add test --- .../anthropic/tests/integration_tests/test_chat_models.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index b5f1e59ced132..906de58ea08ae 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -35,6 +35,7 @@ def test_stream() -> None: full: Optional[BaseMessageChunk] = None chunks_with_input_token_counts = 0 chunks_with_output_token_counts = 0 + chunks_with_model_name = 0 for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str) full = token if full is None else full + token @@ -44,12 +45,14 @@ def test_stream() -> None: chunks_with_input_token_counts += 1 elif token.usage_metadata.get("output_tokens"): chunks_with_output_token_counts += 1 + chunks_with_model_name += int("model_name" in token.response_metadata) if chunks_with_input_token_counts != 1 or chunks_with_output_token_counts != 1: raise AssertionError( "Expected exactly one chunk with input or output token counts. " "AIMessageChunk aggregation adds counts. Check that " "this is behaving properly." ) + assert chunks_with_model_name == 1 # check token usage is populated assert isinstance(full, AIMessageChunk) assert full.usage_metadata is not None From 19356de864e0b93b4b27fc0adf0e1b0916c43409 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 28 Feb 2025 13:53:42 -0500 Subject: [PATCH 4/4] nit --- libs/partners/anthropic/langchain_anthropic/chat_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 384bb98a1e6c9..25af7afde2e22 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -900,7 +900,7 @@ def _format_output(self, data: Any, **kwargs: Any) -> ChatResult: llm_output = { k: v for k, v in data_dict.items() if k not in ("content", "role", "type") } - if "model" in llm_output: + if "model" in llm_output and "model_name" not in llm_output: llm_output["model_name"] = llm_output["model"] if ( len(content) == 1