Skip to content

Commit 87d9008

Browse files
authored
Merge branch 'main' into vi-test
2 parents 72db52b + dbad3b8 commit 87d9008

File tree

8 files changed

+74
-9
lines changed

8 files changed

+74
-9
lines changed

libs/community/langchain_google_community/bq_storage_vectorstores/bigquery.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ def _create_search_query(
301301
if table_to_query is not None:
302302
embeddings_query = f"""
303303
with embeddings as (
304-
SELECT {self.embedding_field}, ROW_NUMBER() OVER() as row_num
304+
SELECT {self.embedding_field}, row_num
305305
from `{table_to_query}`
306306
)"""
307307

@@ -390,14 +390,16 @@ def _create_temp_bq_table(
390390
df = pd.DataFrame([])
391391

392392
df[self.embedding_field] = embeddings
393+
df["row_num"] = list(range(len(df)))
393394
table_id = (
394395
f"{self.project_id}."
395396
f"{self.temp_dataset_name}."
396397
f"{self.table_name}_{uuid.uuid4().hex}"
397398
)
398399

399400
schema = [
400-
bigquery.SchemaField(self.embedding_field, "FLOAT64", mode="REPEATED")
401+
bigquery.SchemaField(self.embedding_field, "FLOAT64", mode="REPEATED"),
402+
bigquery.SchemaField("row_num", "INT64"),
401403
]
402404
table_ref = bigquery.Table(table_id, schema=schema)
403405
table = self._bq_client.create_table(table_ref)
@@ -483,7 +485,7 @@ def batch_search(
483485
)
484486

485487
if queries is not None:
486-
embeddings = self.embedding.embed_documents(queries)
488+
embeddings = [self.embedding.embed_query(query) for query in queries]
487489

488490
if embeddings is None:
489491
raise ValueError("Could not obtain embeddings - value is None.")

libs/genai/langchain_google_genai/_function_utils.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -342,8 +342,6 @@ def _get_items_from_schema(schema: Union[Dict, List, str]) -> Dict[str, Any]:
342342
items["type_"] = _get_type_from_schema(schema)
343343
if items["type_"] == glm.Type.OBJECT and "properties" in schema:
344344
items["properties"] = _get_properties_from_schema_any(schema["properties"])
345-
if "title" in schema:
346-
items["title"] = schema
347345
if "title" in schema or "description" in schema:
348346
items["description"] = (
349347
schema.get("description") or schema.get("title") or ""

libs/genai/langchain_google_genai/chat_models.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1461,7 +1461,11 @@ def create_cached_content(
14611461

14621462
@property
14631463
def _supports_tool_choice(self) -> bool:
1464-
return "gemini-1.5-pro" in self.model or "gemini-1.5-flash" in self.model
1464+
return (
1465+
"gemini-1.5-pro" in self.model
1466+
or "gemini-1.5-flash" in self.model
1467+
or "gemini-2" in self.model
1468+
)
14651469

14661470

14671471
def _get_tool_name(

libs/genai/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "langchain-google-genai"
3-
version = "2.0.6"
3+
version = "2.0.7"
44
description = "An integration package connecting Google's genai package and LangChain"
55
authors = []
66
readme = "README.md"

libs/genai/tests/integration_tests/test_standard.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,36 @@
1111
from langchain_google_genai import ChatGoogleGenerativeAI
1212

1313
rate_limiter = InMemoryRateLimiter(requests_per_second=0.25)
14+
rate_limiter_2_0 = InMemoryRateLimiter(requests_per_second=0.1)
15+
16+
17+
class TestGeminiAI2Standard(ChatModelIntegrationTests):
18+
@property
19+
def chat_model_class(self) -> Type[BaseChatModel]:
20+
return ChatGoogleGenerativeAI
21+
22+
@property
23+
def chat_model_params(self) -> dict:
24+
return {
25+
"model": "models/gemini-2.0-flash-exp",
26+
"rate_limiter": rate_limiter_2_0,
27+
}
28+
29+
@pytest.mark.xfail(reason="with_structured_output with JSON schema not supported.")
30+
async def test_structured_output_async(self, model: BaseChatModel) -> None:
31+
await super().test_structured_output_async(model)
32+
33+
@pytest.mark.xfail(reason="with_structured_output with JSON schema not supported.")
34+
def test_structured_output(self, model: BaseChatModel) -> None:
35+
super().test_structured_output(model)
36+
37+
@pytest.mark.xfail(reason="with_structured_output with JSON schema not supported.")
38+
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
39+
super().test_structured_output_pydantic_2_v1(model)
40+
41+
@pytest.mark.xfail(reason="investigate")
42+
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
43+
super().test_bind_runnables_as_tools(model)
1444

1545

1646
class TestGeminiAIStandard(ChatModelIntegrationTests):

libs/vertexai/langchain_google_vertexai/_utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,11 @@ def _missing_(cls, value: Any) -> "GoogleModelFamily":
131131
"medlm-large-1.5-001",
132132
"medlm-large-1.5@001",
133133
}
134-
if "gemini-1.5" in model_name or model_name in gemini_advanced_models:
134+
if (
135+
"gemini-1.5" in model_name
136+
or model_name in gemini_advanced_models
137+
or "gemini-2" in model_name
138+
):
135139
return GoogleModelFamily.GEMINI_ADVANCED
136140
if "gemini" in model_name:
137141
return GoogleModelFamily.GEMINI

libs/vertexai/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "langchain-google-vertexai"
3-
version = "2.0.8"
3+
version = "2.0.9"
44
description = "An integration package connecting Google VertexAI and LangChain"
55
authors = []
66
readme = "README.md"

libs/vertexai/tests/integration_tests/test_standard.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,33 @@
1212
rate_limiter = InMemoryRateLimiter(requests_per_second=0.5)
1313

1414

15+
@pytest.mark.first
16+
class TestGemini2AIStandard(ChatModelIntegrationTests):
17+
@property
18+
def chat_model_class(self) -> Type[BaseChatModel]:
19+
return ChatVertexAI
20+
21+
@property
22+
def chat_model_params(self) -> dict:
23+
return {
24+
"model_name": "gemini-2.0-flash-exp",
25+
"rate_limiter": rate_limiter,
26+
"temperature": 0,
27+
}
28+
29+
@property
30+
def supports_image_inputs(self) -> bool:
31+
return True
32+
33+
@property
34+
def supports_video_inputs(self) -> bool:
35+
return True
36+
37+
@property
38+
def supports_audio_inputs(self) -> bool:
39+
return True
40+
41+
1542
@pytest.mark.first
1643
class TestGeminiAIStandard(ChatModelIntegrationTests):
1744
@property

0 commit comments

Comments
 (0)