diff --git a/.github/workflows/schedule_reporter.yml b/.github/workflows/schedule_reporter.yml new file mode 100644 index 00000000..3549f373 --- /dev/null +++ b/.github/workflows/schedule_reporter.yml @@ -0,0 +1,25 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Schedule Reporter + +on: + schedule: + - cron: '0 6 * * *' # Runs at 6 AM every morning + +jobs: + run_reporter: + uses: googleapis/langchain-google-alloydb-pg-python/.github/workflows/cloud_build_failure_reporter.yml@main + with: + trigger_names: "pg-integration-test-nightly,pg-continuous-test-on-merge" diff --git a/CHANGELOG.md b/CHANGELOG.md index d75d9a9e..3ac84353 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.12.0](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/compare/v0.11.1...v0.12.0) (2025-01-06) + + +### Features + +* Add engine_args argument to engine creation functions ([#242](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/issues/242)) ([5f2f7b7](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/commit/5f2f7b7754824fde23867137e32208ea276be43c)) + ## [0.11.1](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/compare/v0.11.0...v0.11.1) (2024-11-15) diff --git a/samples/index_tuning_sample/README.md b/samples/index_tuning_sample/README.md index 16556915..2163bca4 100644 --- a/samples/index_tuning_sample/README.md +++ b/samples/index_tuning_sample/README.md @@ -174,7 +174,7 @@ class HNSWIndex( index_type: str = "hnsw", # Distance strategy does not affect recall and has minimal little on latency; refer to this guide to learn more https://cloud.google.com/spanner/docs/choose-vector-distance-function distance_strategy: DistanceStrategy = lambda : DistanceStrategy.COSINE_DISTANCE, - partial_indexes: List[str] | None = None, + partial_indexes: list[str] | None = None, m: int = 16, ef_construction: int = 64 ) @@ -222,7 +222,7 @@ class IVFFlatIndex( name: str = DEFAULT_INDEX_NAME, index_type: str = "ivfflat", distance_strategy: DistanceStrategy = lambda : DistanceStrategy.COSINE_DISTANCE, - partial_indexes: List[str] | None = None, + partial_indexes: list[str] | None = None, lists: int = 1 ) diff --git a/samples/index_tuning_sample/requirements.txt b/samples/index_tuning_sample/requirements.txt index de611708..0f055624 100644 --- a/samples/index_tuning_sample/requirements.txt +++ b/samples/index_tuning_sample/requirements.txt @@ -1,3 +1,3 @@ -langchain-community==0.3.1 +langchain-community==0.2.19 langchain-google-cloud-sql-pg==0.10.0 langchain-google-vertexai==2.0.0 diff --git a/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py b/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py index f8c6ca71..70fefcd9 100644 --- a/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py +++ b/samples/langchain_on_vertexai/prebuilt_langchain_agent_template.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from typing import List import vertexai # type: ignore from config import ( @@ -38,14 +37,14 @@ engine = None # Use global variable to share connection pooling -def similarity_search(query: str) -> List[Document]: +def similarity_search(query: str) -> list[Document]: """Searches and returns movies. Args: query: The user query to search for related items Returns: - List[Document]: A list of Documents + list[Document]: A list of Documents """ global engine if not engine: # Reuse connection pool diff --git a/samples/langchain_on_vertexai/requirements.txt b/samples/langchain_on_vertexai/requirements.txt index 818c8021..001d3bbe 100644 --- a/samples/langchain_on_vertexai/requirements.txt +++ b/samples/langchain_on_vertexai/requirements.txt @@ -1,5 +1,5 @@ google-cloud-aiplatform[reasoningengine,langchain]==1.69.0 google-cloud-resource-manager==1.12.5 -langchain-community==0.2.16 +langchain-community==0.2.19 langchain-google-cloud-sql-pg==0.10.0 langchain-google-vertexai==1.0.10 diff --git a/samples/requirements.txt b/samples/requirements.txt index 818c8021..001d3bbe 100644 --- a/samples/requirements.txt +++ b/samples/requirements.txt @@ -1,5 +1,5 @@ google-cloud-aiplatform[reasoningengine,langchain]==1.69.0 google-cloud-resource-manager==1.12.5 -langchain-community==0.2.16 +langchain-community==0.2.19 langchain-google-cloud-sql-pg==0.10.0 langchain-google-vertexai==1.0.10 diff --git a/src/langchain_google_cloud_sql_pg/async_chat_message_history.py b/src/langchain_google_cloud_sql_pg/async_chat_message_history.py index a7873aae..68e15494 100644 --- a/src/langchain_google_cloud_sql_pg/async_chat_message_history.py +++ b/src/langchain_google_cloud_sql_pg/async_chat_message_history.py @@ -15,7 +15,7 @@ from __future__ import annotations import json -from typing import List, Sequence +from typing import Sequence from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.messages import BaseMessage, messages_from_dict @@ -128,7 +128,7 @@ async def aclear(self) -> None: await conn.execute(text(query), {"session_id": self.session_id}) await conn.commit() - async def _aget_messages(self) -> List[BaseMessage]: + async def _aget_messages(self) -> list[BaseMessage]: """Retrieve the messages from PostgreSQL.""" query = f"""SELECT data, type FROM "{self.schema_name}"."{self.table_name}" WHERE session_id = :session_id ORDER BY id;""" async with self.pool.connect() as conn: diff --git a/src/langchain_google_cloud_sql_pg/async_loader.py b/src/langchain_google_cloud_sql_pg/async_loader.py index 90e94526..45e14116 100644 --- a/src/langchain_google_cloud_sql_pg/async_loader.py +++ b/src/langchain_google_cloud_sql_pg/async_loader.py @@ -15,7 +15,7 @@ from __future__ import annotations import json -from typing import Any, AsyncIterator, Callable, Dict, Iterable, List, Optional +from typing import Any, AsyncIterator, Callable, Iterable, Optional from langchain_core.document_loaders.base import BaseLoader from langchain_core.documents import Document @@ -28,24 +28,24 @@ DEFAULT_METADATA_COL = "langchain_metadata" -def text_formatter(row: dict, content_columns: List[str]) -> str: +def text_formatter(row: dict, content_columns: list[str]) -> str: """txt document formatter.""" return " ".join(str(row[column]) for column in content_columns if column in row) -def csv_formatter(row: dict, content_columns: List[str]) -> str: +def csv_formatter(row: dict, content_columns: list[str]) -> str: """CSV document formatter.""" return ", ".join(str(row[column]) for column in content_columns if column in row) -def yaml_formatter(row: dict, content_columns: List[str]) -> str: +def yaml_formatter(row: dict, content_columns: list[str]) -> str: """YAML document formatter.""" return "\n".join( f"{column}: {str(row[column])}" for column in content_columns if column in row ) -def json_formatter(row: dict, content_columns: List[str]) -> str: +def json_formatter(row: dict, content_columns: list[str]) -> str: """JSON document formatter.""" dictionary = {} for column in content_columns: @@ -63,7 +63,7 @@ def _parse_doc_from_row( ) -> Document: """Parse row into document.""" page_content = formatter(row, content_columns) - metadata: Dict[str, Any] = {} + metadata: dict[str, Any] = {} # unnest metadata from langchain_metadata column if metadata_json_column and row.get(metadata_json_column): for k, v in row[metadata_json_column].items(): @@ -81,10 +81,10 @@ def _parse_row_from_doc( column_names: Iterable[str], content_column: str = DEFAULT_CONTENT_COL, metadata_json_column: Optional[str] = DEFAULT_METADATA_COL, -) -> Dict: +) -> dict: """Parse document into a dictionary of rows.""" doc_metadata = doc.metadata.copy() - row: Dict[str, Any] = {content_column: doc.page_content} + row: dict[str, Any] = {content_column: doc.page_content} for entry in doc.metadata: if entry in column_names: row[entry] = doc_metadata[entry] @@ -111,8 +111,8 @@ def __init__( key: object, pool: AsyncEngine, query: str, - content_columns: List[str], - metadata_columns: List[str], + content_columns: list[str], + metadata_columns: list[str], formatter: Callable, metadata_json_column: Optional[str] = None, ) -> None: @@ -122,8 +122,8 @@ def __init__( key (object): Prevent direct constructor usage. engine (PostgresEngine): AsyncEngine with pool connection to the postgres database query (Optional[str], optional): SQL query. Defaults to None. - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. formatter (Optional[Callable], optional): A function to format page content (OneOf: format, formatter). Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". @@ -150,8 +150,8 @@ async def create( query: Optional[str] = None, table_name: Optional[str] = None, schema_name: str = "public", - content_columns: Optional[List[str]] = None, - metadata_columns: Optional[List[str]] = None, + content_columns: Optional[list[str]] = None, + metadata_columns: Optional[list[str]] = None, metadata_json_column: Optional[str] = None, format: Optional[str] = None, formatter: Optional[Callable] = None, @@ -163,8 +163,8 @@ async def create( query (Optional[str], optional): SQL query. Defaults to None. table_name (Optional[str], optional): Name of table to query. Defaults to None. schema_name (str, optional): Database schema name of the table. Defaults to "public". - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". format (Optional[str], optional): Format of page content (OneOf: text, csv, YAML, JSON). Defaults to 'text'. formatter (Optional[Callable], optional): A function to format page content (OneOf: format, formatter). Defaults to None. @@ -236,7 +236,7 @@ async def create( metadata_json_column, ) - async def aload(self) -> List[Document]: + async def aload(self) -> list[Document]: """Load PostgreSQL data into Document objects.""" return [doc async for doc in self.alazy_load()] @@ -280,7 +280,7 @@ def __init__( table_name: str, content_column: str, schema_name: str = "public", - metadata_columns: List[str] = [], + metadata_columns: list[str] = [], metadata_json_column: Optional[str] = None, ): """AsyncPostgresDocumentSaver constructor. @@ -289,9 +289,9 @@ def __init__( key (object): Prevent direct constructor usage. engine (PostgresEngine): AsyncEngine with pool connection to the postgres database table_name (Optional[str], optional): Name of table to query. Defaults to None. - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". Raises: @@ -315,7 +315,7 @@ async def create( table_name: str, schema_name: str = "public", content_column: str = DEFAULT_CONTENT_COL, - metadata_columns: List[str] = [], + metadata_columns: list[str] = [], metadata_json_column: Optional[str] = DEFAULT_METADATA_COL, ) -> AsyncPostgresDocumentSaver: """Create an AsyncPostgresDocumentSaver instance. @@ -323,8 +323,8 @@ async def create( Args: engine (PostgresEngine):AsyncEngine with pool connection to the postgres database table_name (Optional[str], optional): Name of table to query. Defaults to None. - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". Returns: @@ -367,13 +367,13 @@ async def create( metadata_json_column, ) - async def aadd_documents(self, docs: List[Document]) -> None: + async def aadd_documents(self, docs: list[Document]) -> None: """ Save documents in the DocumentSaver table. Document’s metadata is added to columns if found or stored in langchain_metadata JSON column. Args: - docs (List[langchain_core.documents.Document]): a list of documents to be saved. + docs (list[langchain_core.documents.Document]): a list of documents to be saved. """ for doc in docs: @@ -411,13 +411,13 @@ async def aadd_documents(self, docs: List[Document]) -> None: await conn.execute(text(query), row) await conn.commit() - async def adelete(self, docs: List[Document]) -> None: + async def adelete(self, docs: list[Document]) -> None: """ Delete all instances of a document from the DocumentSaver table by matching the entire Document object. Args: - docs (List[langchain_core.documents.Document]): a list of documents to be deleted. + docs (list[langchain_core.documents.Document]): a list of documents to be deleted. """ for doc in docs: row = _parse_row_from_doc( diff --git a/src/langchain_google_cloud_sql_pg/async_vectorstore.py b/src/langchain_google_cloud_sql_pg/async_vectorstore.py index fcf92dbf..a3258c85 100644 --- a/src/langchain_google_cloud_sql_pg/async_vectorstore.py +++ b/src/langchain_google_cloud_sql_pg/async_vectorstore.py @@ -17,7 +17,7 @@ import json import uuid -from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Type +from typing import Any, Callable, Iterable, Optional, Sequence import numpy as np from langchain_core.documents import Document @@ -52,7 +52,7 @@ def __init__( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], + metadata_columns: list[str] = [], id_column: str = "langchain_id", metadata_json_column: Optional[str] = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -70,7 +70,7 @@ def __init__( schema_name (str, optional): Database schema name of the table. Defaults to "public". content_column (str): Column that represent a Document's page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. + metadata_columns (list[str]): Column(s) that represent a document's metadata. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -112,8 +112,8 @@ async def create( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: Optional[str] = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -131,8 +131,8 @@ async def create( schema_name (str, optional): Database schema name of the table. Defaults to "public". content_column (str): Column that represent a Document's page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -224,11 +224,11 @@ def embeddings(self) -> Embeddings: async def __aadd_embeddings( self, texts: Iterable[str], - embeddings: List[List[float]], - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + embeddings: list[list[float]], + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Add embeddings to the table. Raises: @@ -279,10 +279,10 @@ async def __aadd_embeddings( async def aadd_texts( self, texts: Iterable[str], - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Embed texts and add to the table. Raises: @@ -296,10 +296,10 @@ async def aadd_texts( async def aadd_documents( self, - documents: List[Document], - ids: Optional[List] = None, + documents: list[Document], + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Embed documents and add to the table. Raises: @@ -312,7 +312,7 @@ async def aadd_documents( async def adelete( self, - ids: Optional[List] = None, + ids: Optional[list] = None, **kwargs: Any, ) -> Optional[bool]: """Delete records from the table. @@ -332,18 +332,18 @@ async def adelete( @classmethod async def afrom_texts( # type: ignore[override] - cls: Type[AsyncPostgresVectorStore], - texts: List[str], + cls: type[AsyncPostgresVectorStore], + texts: list[str], embedding: Embeddings, engine: PostgresEngine, table_name: str, schema_name: str = "public", - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -356,17 +356,17 @@ async def afrom_texts( # type: ignore[override] """Create an AsyncPostgresVectorStore instance from texts. Args: - texts (List[str]): Texts to add to the vector store. + texts (list[str]): Texts to add to the vector store. embedding (Embeddings): Text embedding model to use. engine (PostgresEngine): Connection pool engine for managing connections to Postgres database. table_name (str): Name of the existing table or the table to be created. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadatas (Optional[List[dict]]): List of metadatas to add to table records. - ids: (Optional[List[str]]): List of IDs to add to table records. + metadatas (Optional[list[dict]]): List of metadatas to add to table records. + ids: (Optional[list[str]]): List of IDs to add to table records. content_column (str): Column that represent a Document’s page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -403,17 +403,17 @@ async def afrom_texts( # type: ignore[override] @classmethod async def afrom_documents( # type: ignore[override] - cls: Type[AsyncPostgresVectorStore], - documents: List[Document], + cls: type[AsyncPostgresVectorStore], + documents: list[Document], embedding: Embeddings, engine: PostgresEngine, table_name: str, schema_name: str = "public", - ids: Optional[List] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -426,17 +426,17 @@ async def afrom_documents( # type: ignore[override] """Create an AsyncPostgresVectorStore instance from documents. Args: - documents (List[Document]): Documents to add to the vector store. + documents (list[Document]): Documents to add to the vector store. embedding (Embeddings): Text embedding model to use. engine (PostgresEngine): Connection pool engine for managing connections to Postgres database. table_name (str): Name of the existing table or the table to be created. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadatas (Optional[List[dict]]): List of metadatas to add to table records. - ids: (Optional[List[str]]): List of IDs to add to table records. + metadatas (Optional[list[dict]]): List of metadatas to add to table records. + ids: (Optional[list[str]]): List of IDs to add to table records. content_column (str): Column that represent a Document's page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -475,7 +475,7 @@ async def afrom_documents( # type: ignore[override] async def __query_collection( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, @@ -508,7 +508,7 @@ async def asimilarity_search( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected by similarity search on query.""" embedding = self.embedding_service.embed_query(text=query) @@ -533,7 +533,7 @@ async def asimilarity_search_with_score( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected by similarity search on query.""" embedding = self.embedding_service.embed_query(query) docs = await self.asimilarity_search_with_score_by_vector( @@ -543,11 +543,11 @@ async def asimilarity_search_with_score( async def asimilarity_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected by vector similarity search.""" docs_and_scores = await self.asimilarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, **kwargs @@ -557,11 +557,11 @@ async def asimilarity_search_by_vector( async def asimilarity_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected by vector similarity search.""" results = await self.__query_collection( embedding=embedding, k=k, filter=filter, **kwargs @@ -596,7 +596,7 @@ async def amax_marginal_relevance_search( lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected using the maximal marginal relevance.""" embedding = self.embedding_service.embed_query(text=query) @@ -611,13 +611,13 @@ async def amax_marginal_relevance_search( async def amax_marginal_relevance_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected using the maximal marginal relevance.""" docs_and_scores = ( await self.amax_marginal_relevance_search_with_score_by_vector( @@ -634,13 +634,13 @@ async def amax_marginal_relevance_search_by_vector( async def amax_marginal_relevance_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected using the maximal marginal relevance.""" results = await self.__query_collection( embedding=embedding, k=fetch_k, filter=filter, **kwargs @@ -749,7 +749,7 @@ def similarity_search( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) @@ -757,27 +757,27 @@ def similarity_search( def add_texts( self, texts: Iterable[str], - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) def add_documents( self, - documents: List[Document], - ids: Optional[List] = None, + documents: list[Document], + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) def delete( self, - ids: Optional[List] = None, + ids: Optional[list] = None, **kwargs: Any, ) -> Optional[bool]: raise NotImplementedError( @@ -786,17 +786,17 @@ def delete( @classmethod def from_texts( # type: ignore[override] - cls: Type[AsyncPostgresVectorStore], - texts: List[str], + cls: type[AsyncPostgresVectorStore], + texts: list[str], embedding: Embeddings, engine: PostgresEngine, table_name: str, - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", **kwargs: Any, @@ -807,16 +807,16 @@ def from_texts( # type: ignore[override] @classmethod def from_documents( # type: ignore[override] - cls: Type[AsyncPostgresVectorStore], - documents: List[Document], + cls: type[AsyncPostgresVectorStore], + documents: list[Document], embedding: Embeddings, engine: PostgresEngine, table_name: str, - ids: Optional[List] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", **kwargs: Any, @@ -831,29 +831,29 @@ def similarity_search_with_score( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) def similarity_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) def similarity_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) @@ -866,33 +866,33 @@ def max_marginal_relevance_search( lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) def max_marginal_relevance_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) def max_marginal_relevance_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: raise NotImplementedError( "Sync methods are not implemented for AsyncPostgresVectorStore. Use PostgresVectorStore interface instead." ) diff --git a/src/langchain_google_cloud_sql_pg/chat_message_history.py b/src/langchain_google_cloud_sql_pg/chat_message_history.py index 306dba15..10f3540d 100644 --- a/src/langchain_google_cloud_sql_pg/chat_message_history.py +++ b/src/langchain_google_cloud_sql_pg/chat_message_history.py @@ -14,7 +14,7 @@ from __future__ import annotations -from typing import List, Sequence +from typing import Sequence from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.messages import BaseMessage, messages_from_dict @@ -107,7 +107,7 @@ def create_sync( return cls(cls.__create_key, engine, history) @property # type: ignore[override] - def messages(self) -> List[BaseMessage]: + def messages(self) -> list[BaseMessage]: """The abstraction required a property.""" return self._engine._run_as_sync(self._history._aget_messages()) diff --git a/src/langchain_google_cloud_sql_pg/engine.py b/src/langchain_google_cloud_sql_pg/engine.py index 33b7a83f..1fc30815 100644 --- a/src/langchain_google_cloud_sql_pg/engine.py +++ b/src/langchain_google_cloud_sql_pg/engine.py @@ -18,7 +18,7 @@ from concurrent.futures import Future from dataclasses import dataclass from threading import Thread -from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, TypeVar, Union +from typing import TYPE_CHECKING, Any, Awaitable, Mapping, Optional, TypeVar, Union import aiohttp import google.auth # type: ignore @@ -66,7 +66,7 @@ async def _get_iam_principal_email( url = f"https://oauth2.googleapis.com/tokeninfo?access_token={credentials.token}" async with aiohttp.ClientSession() as client: response = await client.get(url, raise_for_status=True) - response_json: Dict = await response.json() + response_json: dict = await response.json() email = response_json.get("email") if email is None: raise ValueError( @@ -143,6 +143,7 @@ async def _create( thread: Optional[Thread] = None, quota_project: Optional[str] = None, iam_account_email: Optional[str] = None, + engine_args: Mapping = {}, ) -> PostgresEngine: """Create a PostgresEngine instance. @@ -158,6 +159,9 @@ async def _create( thread (Optional[Thread]): Thread used to create the engine async. quota_project (Optional[str]): Project that provides quota for API calls. iam_account_email (Optional[str]): IAM service account email. Defaults to None. + engine_args (Mapping): Additional arguments that are passed directly to + :func:`~sqlalchemy.ext.asyncio.mymodule.MyClass.create_async_engine`. This can be + used to specify additional parameters to the underlying pool during it's creation. Raises: ValueError: If only one of `user` and `password` is specified. @@ -211,6 +215,7 @@ async def getconn() -> asyncpg.Connection: engine = create_async_engine( "postgresql+asyncpg://", async_creator=getconn, + **engine_args, ) return cls(cls.__create_key, engine, loop, thread) @@ -226,6 +231,7 @@ def __start_background_loop( ip_type: Union[str, IPTypes] = IPTypes.PUBLIC, quota_project: Optional[str] = None, iam_account_email: Optional[str] = None, + engine_args: Mapping = {}, ) -> Future: # Running a loop in a background thread allows us to support # async methods from non-async environments @@ -247,6 +253,7 @@ def __start_background_loop( thread=cls._default_thread, quota_project=quota_project, iam_account_email=iam_account_email, + engine_args=engine_args, ) return asyncio.run_coroutine_threadsafe(coro, cls._default_loop) @@ -262,6 +269,7 @@ def from_instance( ip_type: Union[str, IPTypes] = IPTypes.PUBLIC, quota_project: Optional[str] = None, iam_account_email: Optional[str] = None, + engine_args: Mapping = {}, ) -> PostgresEngine: """Create a PostgresEngine from a Postgres instance. @@ -275,6 +283,9 @@ def from_instance( ip_type (Union[str, IPTypes], optional): IP address type. Defaults to IPTypes.PUBLIC. quota_project (Optional[str]): Project that provides quota for API calls. iam_account_email (Optional[str], optional): IAM service account email. Defaults to None. + engine_args (Mapping): Additional arguments that are passed directly to + :func:`~sqlalchemy.ext.asyncio.mymodule.MyClass.create_async_engine`. This can be + used to specify additional parameters to the underlying pool during it's creation. Returns: PostgresEngine: A newly created PostgresEngine instance. @@ -289,6 +300,7 @@ def from_instance( ip_type, quota_project=quota_project, iam_account_email=iam_account_email, + engine_args=engine_args, ) return future.result() @@ -304,6 +316,7 @@ async def afrom_instance( ip_type: Union[str, IPTypes] = IPTypes.PUBLIC, quota_project: Optional[str] = None, iam_account_email: Optional[str] = None, + engine_args: Mapping = {}, ) -> PostgresEngine: """Create a PostgresEngine from a Postgres instance. @@ -317,6 +330,9 @@ async def afrom_instance( ip_type (Union[str, IPTypes], optional): IP address type. Defaults to IPTypes.PUBLIC. quota_project (Optional[str]): Project that provides quota for API calls. iam_account_email (Optional[str], optional): IAM service account email. Defaults to None. + engine_args (Mapping): Additional arguments that are passed directly to + :func:`~sqlalchemy.ext.asyncio.mymodule.MyClass.create_async_engine`. This can be + used to specify additional parameters to the underlying pool during it's creation. Returns: PostgresEngine: A newly created PostgresEngine instance. @@ -331,6 +347,7 @@ async def afrom_instance( ip_type, quota_project=quota_project, iam_account_email=iam_account_email, + engine_args=engine_args, ) return await asyncio.wrap_future(future) @@ -346,7 +363,7 @@ def from_engine( @classmethod def from_engine_args( cls, - url: Union[str | URL], + url: str | URL, **kwargs: Any, ) -> PostgresEngine: """Create an PostgresEngine instance from arguments. These parameters are pass directly into sqlalchemy's create_async_engine function. @@ -408,7 +425,7 @@ async def _ainit_vectorstore_table( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[Column] = [], + metadata_columns: list[Column] = [], metadata_json_column: str = "langchain_metadata", id_column: Union[str, Column] = "langchain_id", overwrite_existing: bool = False, @@ -426,7 +443,7 @@ async def _ainit_vectorstore_table( Default: "page_content". embedding_column (str) : Name of the column to store vector embeddings. Default: "embedding". - metadata_columns (List[Column]): A list of Columns to create for custom + metadata_columns (list[Column]): A list of Columns to create for custom metadata. Default: []. Optional. metadata_json_column (str): The column to store extra metadata in JSON format. Default: "langchain_metadata". Optional. @@ -475,7 +492,7 @@ async def ainit_vectorstore_table( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[Column] = [], + metadata_columns: list[Column] = [], metadata_json_column: str = "langchain_metadata", id_column: Union[str, Column] = "langchain_id", overwrite_existing: bool = False, @@ -493,7 +510,7 @@ async def ainit_vectorstore_table( Default: "page_content". embedding_column (str) : Name of the column to store vector embeddings. Default: "embedding". - metadata_columns (List[Column]): A list of Columns to create for custom + metadata_columns (list[Column]): A list of Columns to create for custom metadata. Default: []. Optional. metadata_json_column (str): The column to store extra metadata in JSON format. Default: "langchain_metadata". Optional. @@ -525,7 +542,7 @@ def init_vectorstore_table( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[Column] = [], + metadata_columns: list[Column] = [], metadata_json_column: str = "langchain_metadata", id_column: Union[str, Column] = "langchain_id", overwrite_existing: bool = False, @@ -543,7 +560,7 @@ def init_vectorstore_table( Default: "page_content". embedding_column (str) : Name of the column to store vector embeddings. Default: "embedding". - metadata_columns (List[Column]): A list of Columns to create for custom + metadata_columns (list[Column]): A list of Columns to create for custom metadata. Default: []. Optional. metadata_json_column (str): The column to store extra metadata in JSON format. Default: "langchain_metadata". Optional. @@ -633,7 +650,7 @@ async def _ainit_document_table( table_name: str, schema_name: str = "public", content_column: str = "page_content", - metadata_columns: List[Column] = [], + metadata_columns: list[Column] = [], metadata_json_column: str = "langchain_metadata", store_metadata: bool = True, ) -> None: @@ -657,7 +674,7 @@ async def ainit_document_table( table_name: str, schema_name: str = "public", content_column: str = "page_content", - metadata_columns: List[Column] = [], + metadata_columns: list[Column] = [], metadata_json_column: str = "langchain_metadata", store_metadata: bool = True, ) -> None: @@ -670,7 +687,7 @@ async def ainit_document_table( Default: "public". content_column (str): Name of the column to store document content. Default: "page_content". - metadata_columns (List[sqlalchemy.Column]): A list of SQLAlchemy Columns + metadata_columns (list[sqlalchemy.Column]): A list of SQLAlchemy Columns to create for custom metadata. Optional. metadata_json_column (str): The column to store extra metadata in JSON format. Default: "langchain_metadata". Optional. @@ -696,7 +713,7 @@ def init_document_table( table_name: str, schema_name: str = "public", content_column: str = "page_content", - metadata_columns: List[Column] = [], + metadata_columns: list[Column] = [], metadata_json_column: str = "langchain_metadata", store_metadata: bool = True, ) -> None: @@ -709,7 +726,7 @@ def init_document_table( Default: "public". content_column (str): Name of the column to store document content. Default: "page_content". - metadata_columns (List[sqlalchemy.Column]): A list of SQLAlchemy Columns + metadata_columns (list[sqlalchemy.Column]): A list of SQLAlchemy Columns to create for custom metadata. Optional. metadata_json_column (str): The column to store extra metadata in JSON format. Default: "langchain_metadata". Optional. diff --git a/src/langchain_google_cloud_sql_pg/indexes.py b/src/langchain_google_cloud_sql_pg/indexes.py index b5616a8c..18d7a740 100644 --- a/src/langchain_google_cloud_sql_pg/indexes.py +++ b/src/langchain_google_cloud_sql_pg/indexes.py @@ -15,7 +15,7 @@ import enum from abc import ABC, abstractmethod from dataclasses import dataclass, field -from typing import List, Optional +from typing import Optional @dataclass @@ -44,7 +44,7 @@ class BaseIndex(ABC): distance_strategy: DistanceStrategy = field( default_factory=lambda: DistanceStrategy.COSINE_DISTANCE ) - partial_indexes: Optional[List[str]] = None + partial_indexes: Optional[list[str]] = None @abstractmethod def index_options(self) -> str: diff --git a/src/langchain_google_cloud_sql_pg/loader.py b/src/langchain_google_cloud_sql_pg/loader.py index b9f14a4b..3dcba638 100644 --- a/src/langchain_google_cloud_sql_pg/loader.py +++ b/src/langchain_google_cloud_sql_pg/loader.py @@ -14,7 +14,7 @@ from __future__ import annotations -from typing import AsyncIterator, Callable, Iterator, List, Optional +from typing import AsyncIterator, Callable, Iterator, Optional from langchain_core.document_loaders.base import BaseLoader from langchain_core.documents import Document @@ -46,8 +46,8 @@ def __init__( key (object): Prevent direct constructor usage. engine (PostgresEngine): AsyncEngine with pool connection to the postgres database query (Optional[str], optional): SQL query. Defaults to None. - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. formatter (Optional[Callable], optional): A function to format page content (OneOf: format, formatter). Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". @@ -70,8 +70,8 @@ async def create( query: Optional[str] = None, table_name: Optional[str] = None, schema_name: str = "public", - content_columns: Optional[List[str]] = None, - metadata_columns: Optional[List[str]] = None, + content_columns: Optional[list[str]] = None, + metadata_columns: Optional[list[str]] = None, metadata_json_column: Optional[str] = None, format: Optional[str] = None, formatter: Optional[Callable] = None, @@ -83,8 +83,8 @@ async def create( query (Optional[str], optional): SQL query. Defaults to None. table_name (Optional[str], optional): Name of table to query. Defaults to None. schema_name (str, optional): Database schema name of the table. Defaults to "public". - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". format (Optional[str], optional): Format of page content (OneOf: text, csv, YAML, JSON). Defaults to 'text'. formatter (Optional[Callable], optional): A function to format page content (OneOf: format, formatter). Defaults to None. @@ -113,8 +113,8 @@ def create_sync( query: Optional[str] = None, table_name: Optional[str] = None, schema_name: str = "public", - content_columns: Optional[List[str]] = None, - metadata_columns: Optional[List[str]] = None, + content_columns: Optional[list[str]] = None, + metadata_columns: Optional[list[str]] = None, metadata_json_column: Optional[str] = None, format: Optional[str] = None, formatter: Optional[Callable] = None, @@ -126,8 +126,8 @@ def create_sync( query (Optional[str], optional): SQL query. Defaults to None. table_name (Optional[str], optional): Name of table to query. Defaults to None. schema_name (str, optional): Database schema name of the table. Defaults to "public". - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". format (Optional[str], optional): Format of page content (OneOf: text, csv, YAML, JSON). Defaults to 'text'. formatter (Optional[Callable], optional): A function to format page content (OneOf: format, formatter). Defaults to None. @@ -149,11 +149,11 @@ def create_sync( loader = engine._run_as_sync(coro) return cls(cls.__create_key, engine, loader) - def load(self) -> List[Document]: + def load(self) -> list[Document]: """Load PostgreSQL data into Document objects.""" return self._engine._run_as_sync(self._loader.aload()) - async def aload(self) -> List[Document]: + async def aload(self) -> list[Document]: """Load PostgreSQL data into Document objects.""" return await self._engine._run_as_async(self._loader.aload()) @@ -195,9 +195,9 @@ def __init__( key (object): Prevent direct constructor usage. engine (PostgresEngine): AsyncEngine with pool connection to the postgres database table_name (Optional[str], optional): Name of table to query. Defaults to None. - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". Raises: @@ -217,7 +217,7 @@ async def create( table_name: str, schema_name: str = "public", content_column: str = DEFAULT_CONTENT_COL, - metadata_columns: List[str] = [], + metadata_columns: list[str] = [], metadata_json_column: Optional[str] = DEFAULT_METADATA_COL, ) -> PostgresDocumentSaver: """Create an PostgresDocumentSaver instance. @@ -226,8 +226,8 @@ async def create( engine (PostgresEngine):AsyncEngine with pool connection to the postgres database table_name (Optional[str], optional): Name of table to query. Defaults to None. schema_name (str, optional): Database schema name of the table. Defaults to "public". - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". Returns: @@ -251,7 +251,7 @@ def create_sync( table_name: str, schema_name: str = "public", content_column: str = DEFAULT_CONTENT_COL, - metadata_columns: List[str] = [], + metadata_columns: list[str] = [], metadata_json_column: str = DEFAULT_METADATA_COL, ) -> PostgresDocumentSaver: """Create an PostgresDocumentSaver instance. @@ -260,8 +260,8 @@ def create_sync( engine (PostgresEngine):AsyncEngine with pool connection to the postgres database table_name (Optional[str], optional): Name of table to query. Defaults to None. schema_name (str, optional): Database schema name of the table. Defaults to "public". - content_columns (Optional[List[str]], optional): Column that represent a Document's page_content. Defaults to the first column. - metadata_columns (Optional[List[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. + content_columns (Optional[list[str]], optional): Column that represent a Document's page_content. Defaults to the first column. + metadata_columns (Optional[list[str]], optional): Column(s) that represent a Document's metadata. Defaults to None. metadata_json_column (Optional[str], optional): Column to store metadata as JSON. Defaults to "langchain_metadata". Returns: @@ -278,42 +278,42 @@ def create_sync( saver = engine._run_as_sync(coro) return cls(cls.__create_key, engine, saver) - async def aadd_documents(self, docs: List[Document]) -> None: + async def aadd_documents(self, docs: list[Document]) -> None: """ Save documents in the DocumentSaver table. Document’s metadata is added to columns if found or stored in langchain_metadata JSON column. Args: - docs (List[langchain_core.documents.Document]): a list of documents to be saved. + docs (list[langchain_core.documents.Document]): a list of documents to be saved. """ await self._engine._run_as_async(self._saver.aadd_documents(docs)) - def add_documents(self, docs: List[Document]) -> None: + def add_documents(self, docs: list[Document]) -> None: """ Save documents in the DocumentSaver table. Document’s metadata is added to columns if found or stored in langchain_metadata JSON column. Args: - docs (List[langchain_core.documents.Document]): a list of documents to be saved. + docs (list[langchain_core.documents.Document]): a list of documents to be saved. """ self._engine._run_as_sync(self._saver.aadd_documents(docs)) - async def adelete(self, docs: List[Document]) -> None: + async def adelete(self, docs: list[Document]) -> None: """ Delete all instances of a document from the DocumentSaver table by matching the entire Document object. Args: - docs (List[langchain_core.documents.Document]): a list of documents to be deleted. + docs (list[langchain_core.documents.Document]): a list of documents to be deleted. """ await self._engine._run_as_async(self._saver.adelete(docs)) - def delete(self, docs: List[Document]) -> None: + def delete(self, docs: list[Document]) -> None: """ Delete all instances of a document from the DocumentSaver table by matching the entire Document object. Args: - docs (List[langchain_core.documents.Document]): a list of documents to be deleted. + docs (list[langchain_core.documents.Document]): a list of documents to be deleted. """ self._engine._run_as_sync(self._saver.adelete(docs)) diff --git a/src/langchain_google_cloud_sql_pg/vectorstore.py b/src/langchain_google_cloud_sql_pg/vectorstore.py index 109e5d9a..de7275de 100644 --- a/src/langchain_google_cloud_sql_pg/vectorstore.py +++ b/src/langchain_google_cloud_sql_pg/vectorstore.py @@ -15,7 +15,7 @@ # TODO: Remove below import when minimum supported Python version is 3.10 from __future__ import annotations -from typing import Any, Callable, Iterable, List, Optional, Tuple, Type +from typing import Any, Callable, Iterable, Optional import numpy as np from langchain_core.documents import Document @@ -66,8 +66,8 @@ async def create( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: Optional[str] = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -85,8 +85,8 @@ async def create( schema_name (str, optional): Database schema name of the table. Defaults to "public". content_column (str): Column that represent a Document's page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -127,8 +127,8 @@ def create_sync( schema_name: str = "public", content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -146,8 +146,8 @@ def create_sync( schema_name (str, optional): Database schema name of the table. Defaults to "public". content_column (str): Column that represent a Document's page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -186,10 +186,10 @@ def embeddings(self) -> Embeddings: async def aadd_texts( self, texts: Iterable[str], - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Embed texts and add to the table. Raises: @@ -202,10 +202,10 @@ async def aadd_texts( def add_texts( self, texts: Iterable[str], - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Embed texts and add to the table. Raises: @@ -217,10 +217,10 @@ def add_texts( async def aadd_documents( self, - documents: List[Document], - ids: Optional[List] = None, + documents: list[Document], + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Embed documents and add to the table. Raises: @@ -232,10 +232,10 @@ async def aadd_documents( def add_documents( self, - documents: List[Document], - ids: Optional[List] = None, + documents: list[Document], + ids: Optional[list] = None, **kwargs: Any, - ) -> List[str]: + ) -> list[str]: """Embed documents and add to the table. Raises: @@ -247,7 +247,7 @@ def add_documents( async def adelete( self, - ids: Optional[List] = None, + ids: Optional[list] = None, **kwargs: Any, ) -> Optional[bool]: """Delete records from the table. @@ -259,7 +259,7 @@ async def adelete( def delete( self, - ids: Optional[List] = None, + ids: Optional[list] = None, **kwargs: Any, ) -> Optional[bool]: """Delete records from the table. @@ -271,18 +271,18 @@ def delete( @classmethod async def afrom_texts( # type: ignore[override] - cls: Type[PostgresVectorStore], - texts: List[str], + cls: type[PostgresVectorStore], + texts: list[str], embedding: Embeddings, engine: PostgresEngine, table_name: str, schema_name: str = "public", - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -294,17 +294,17 @@ async def afrom_texts( # type: ignore[override] """Create an PostgresVectorStore instance from texts. Args: - texts (List[str]): Texts to add to the vector store. + texts (list[str]): Texts to add to the vector store. embedding (Embeddings): Text embedding model to use. engine (PostgresEngine): Connection pool engine for managing connections to Postgres database. table_name (str): Name of the existing table or the table to be created. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadatas (Optional[List[dict]]): List of metadatas to add to table records. - ids: (Optional[List]): List of IDs to add to table records. + metadatas (Optional[list[dict]]): List of metadatas to add to table records. + ids: (Optional[list]): List of IDs to add to table records. content_column (str): Column that represent a Document’s page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -341,17 +341,17 @@ async def afrom_texts( # type: ignore[override] @classmethod async def afrom_documents( # type: ignore[override] - cls: Type[PostgresVectorStore], - documents: List[Document], + cls: type[PostgresVectorStore], + documents: list[Document], embedding: Embeddings, engine: PostgresEngine, table_name: str, schema_name: str = "public", - ids: Optional[List] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -363,17 +363,17 @@ async def afrom_documents( # type: ignore[override] """Create an PostgresVectorStore instance from documents. Args: - documents (List[Document]): Documents to add to the vector store. + documents (list[Document]): Documents to add to the vector store. embedding (Embeddings): Text embedding model to use. engine (PostgresEngine): Connection pool engine for managing connections to Postgres database. table_name (str): Name of the existing table or the table to be created. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadatas (Optional[List[dict]]): List of metadatas to add to table records. - ids: (Optional[List]): List of IDs to add to table records. + metadatas (Optional[list[dict]]): List of metadatas to add to table records. + ids: (Optional[list]): List of IDs to add to table records. content_column (str): Column that represent a Document’s page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -410,18 +410,18 @@ async def afrom_documents( # type: ignore[override] @classmethod def from_texts( # type: ignore[override] - cls: Type[PostgresVectorStore], - texts: List[str], + cls: type[PostgresVectorStore], + texts: list[str], embedding: Embeddings, engine: PostgresEngine, table_name: str, schema_name: str = "public", - metadatas: Optional[List[dict]] = None, - ids: Optional[List] = None, + metadatas: Optional[list[dict]] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -433,17 +433,17 @@ def from_texts( # type: ignore[override] """Create an PostgresVectorStore instance from texts. Args: - texts (List[str]): Texts to add to the vector store. + texts (list[str]): Texts to add to the vector store. embedding (Embeddings): Text embedding model to use. engine (PostgresEngine): Connection pool engine for managing connections to Postgres database. table_name (str): Name of the existing table or the table to be created. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadatas (Optional[List[dict]]): List of metadatas to add to table records. - ids: (Optional[List]): List of IDs to add to table records. + metadatas (Optional[list[dict]]): List of metadatas to add to table records. + ids: (Optional[list]): List of IDs to add to table records. content_column (str): Column that represent a Document’s page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -480,17 +480,17 @@ def from_texts( # type: ignore[override] @classmethod def from_documents( # type: ignore[override] - cls: Type[PostgresVectorStore], - documents: List[Document], + cls: type[PostgresVectorStore], + documents: list[Document], embedding: Embeddings, engine: PostgresEngine, table_name: str, schema_name: str = "public", - ids: Optional[List] = None, + ids: Optional[list] = None, content_column: str = "content", embedding_column: str = "embedding", - metadata_columns: List[str] = [], - ignore_metadata_columns: Optional[List[str]] = None, + metadata_columns: list[str] = [], + ignore_metadata_columns: Optional[list[str]] = None, id_column: str = "langchain_id", metadata_json_column: str = "langchain_metadata", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, @@ -502,17 +502,17 @@ def from_documents( # type: ignore[override] """Create an PostgresVectorStore instance from documents. Args: - documents (List[Document]): Documents to add to the vector store. + documents (list[Document]): Documents to add to the vector store. embedding (Embeddings): Text embedding model to use. engine (PostgresEngine): Connection pool engine for managing connections to Postgres database. table_name (str): Name of the existing table or the table to be created. schema_name (str, optional): Database schema name of the table. Defaults to "public". - metadatas (Optional[List[dict]]): List of metadatas to add to table records. - ids: (Optional[List]): List of IDs to add to table records. + metadatas (Optional[list[dict]]): List of metadatas to add to table records. + ids: (Optional[list]): List of IDs to add to table records. content_column (str): Column that represent a Document’s page_content. Defaults to "content". embedding_column (str): Column for embedding vectors. The embedding is generated from the document value. Defaults to "embedding". - metadata_columns (List[str]): Column(s) that represent a document's metadata. - ignore_metadata_columns (List[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. + metadata_columns (list[str]): Column(s) that represent a document's metadata. + ignore_metadata_columns (list[str]): Column(s) to ignore in pre-existing tables for a document's metadata. Can not be used with metadata_columns. Defaults to None. id_column (str): Column that represents the Document's id. Defaults to "langchain_id". metadata_json_column (str): Column to store metadata as JSON. Defaults to "langchain_metadata". distance_strategy (DistanceStrategy): Distance strategy to use for vector similarity search. Defaults to COSINE_DISTANCE. @@ -553,7 +553,7 @@ async def asimilarity_search( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected by similarity search on query.""" return await self._engine._run_as_async( self.__vs.asimilarity_search(query, k, filter, **kwargs) @@ -565,7 +565,7 @@ def similarity_search( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected by similarity search on query.""" return self._engine._run_as_sync( self.__vs.asimilarity_search(query, k, filter, **kwargs) @@ -588,7 +588,7 @@ async def asimilarity_search_with_score( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected by similarity search on query.""" return await self._engine._run_as_async( self.__vs.asimilarity_search_with_score(query, k, filter, **kwargs) @@ -600,7 +600,7 @@ def similarity_search_with_score( k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected by similarity search on query.""" return self._engine._run_as_sync( self.__vs.asimilarity_search_with_score(query, k, filter, **kwargs) @@ -608,11 +608,11 @@ def similarity_search_with_score( async def asimilarity_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected by vector similarity search.""" return await self._engine._run_as_async( self.__vs.asimilarity_search_by_vector(embedding, k, filter, **kwargs) @@ -620,11 +620,11 @@ async def asimilarity_search_by_vector( def similarity_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected by vector similarity search.""" return self._engine._run_as_sync( self.__vs.asimilarity_search_by_vector(embedding, k, filter, **kwargs) @@ -632,11 +632,11 @@ def similarity_search_by_vector( async def asimilarity_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected by vector similarity search.""" return await self._engine._run_as_async( self.__vs.asimilarity_search_with_score_by_vector( @@ -646,11 +646,11 @@ async def asimilarity_search_with_score_by_vector( def similarity_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected by similarity search on vector.""" return self._engine._run_as_sync( self.__vs.asimilarity_search_with_score_by_vector( @@ -666,7 +666,7 @@ async def amax_marginal_relevance_search( lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected using the maximal marginal relevance.""" return await self._engine._run_as_async( self.__vs.amax_marginal_relevance_search( @@ -682,7 +682,7 @@ def max_marginal_relevance_search( lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected using the maximal marginal relevance.""" return self._engine._run_as_sync( self.__vs.amax_marginal_relevance_search( @@ -692,13 +692,13 @@ def max_marginal_relevance_search( async def amax_marginal_relevance_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected using the maximal marginal relevance.""" return await self._engine._run_as_async( self.__vs.amax_marginal_relevance_search_by_vector( @@ -708,13 +708,13 @@ async def amax_marginal_relevance_search_by_vector( def max_marginal_relevance_search_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Document]: + ) -> list[Document]: """Return docs selected using the maximal marginal relevance.""" return self._engine._run_as_sync( self.__vs.amax_marginal_relevance_search_by_vector( @@ -724,13 +724,13 @@ def max_marginal_relevance_search_by_vector( async def amax_marginal_relevance_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected using the maximal marginal relevance.""" return await self._engine._run_as_async( self.__vs.amax_marginal_relevance_search_with_score_by_vector( @@ -740,13 +740,13 @@ async def amax_marginal_relevance_search_with_score_by_vector( def max_marginal_relevance_search_with_score_by_vector( self, - embedding: List[float], + embedding: list[float], k: Optional[int] = None, fetch_k: Optional[int] = None, lambda_mult: Optional[float] = None, filter: Optional[str] = None, **kwargs: Any, - ) -> List[Tuple[Document, float]]: + ) -> list[tuple[Document, float]]: """Return docs and distance scores selected using the maximal marginal relevance.""" return self._engine._run_as_sync( self.__vs.amax_marginal_relevance_search_with_score_by_vector( diff --git a/src/langchain_google_cloud_sql_pg/version.py b/src/langchain_google_cloud_sql_pg/version.py index d946cfe3..bc0bc4ca 100644 --- a/src/langchain_google_cloud_sql_pg/version.py +++ b/src/langchain_google_cloud_sql_pg/version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "0.11.1" +__version__ = "0.12.0" diff --git a/tests/test_async_chatmessagehistory.py b/tests/test_async_chatmessagehistory.py index b626674b..e5443b11 100644 --- a/tests/test_async_chatmessagehistory.py +++ b/tests/test_async_chatmessagehistory.py @@ -13,7 +13,6 @@ # limitations under the License. import os import uuid -from typing import Any, Generator import pytest import pytest_asyncio diff --git a/tests/test_chatmessagehistory.py b/tests/test_chatmessagehistory.py index b0a9420a..5ccedd39 100644 --- a/tests/test_chatmessagehistory.py +++ b/tests/test_chatmessagehistory.py @@ -13,7 +13,7 @@ # limitations under the License. import os import uuid -from typing import Any, Generator +from typing import Any import pytest import pytest_asyncio diff --git a/tests/test_engine.py b/tests/test_engine.py index 5e117b0e..1c2653bf 100644 --- a/tests/test_engine.py +++ b/tests/test_engine.py @@ -110,6 +110,11 @@ async def engine(self, db_project, db_region, db_instance, db_name): instance=db_instance, region=db_region, database=db_name, + engine_args={ + # add some connection args to validate engine_args works correctly + "pool_size": 3, + "max_overflow": 2, + }, ) yield engine await aexecute(engine, f'DROP TABLE "{CUSTOM_TABLE}"') @@ -117,6 +122,9 @@ async def engine(self, db_project, db_region, db_instance, db_name): await aexecute(engine, f'DROP TABLE "{INT_ID_CUSTOM_TABLE}"') await engine.close() + async def test_engine_args(self, engine): + assert "Pool size: 3" in engine._pool.pool.status() + async def test_init_table(self, engine): await engine.ainit_vectorstore_table(DEFAULT_TABLE, VECTOR_SIZE) id = str(uuid.uuid4()) diff --git a/tests/test_vectorstore_index.py b/tests/test_vectorstore_index.py index 7c240061..a6b5af92 100644 --- a/tests/test_vectorstore_index.py +++ b/tests/test_vectorstore_index.py @@ -110,7 +110,7 @@ async def vs(self, engine): vs.drop_vector_index() yield vs - async def test_aapply_vector_index(self, vs): + async def test_apply_vector_index(self, vs): index = HNSWIndex() vs.apply_vector_index(index) assert vs.is_valid_index(DEFAULT_INDEX_NAME) @@ -128,7 +128,7 @@ async def test_dropindex(self, vs): result = vs.is_valid_index(DEFAULT_INDEX_NAME) assert not result - async def test_aapply_vector_index_ivfflat(self, vs): + async def test_apply_vector_index_ivfflat(self, vs): index = IVFFlatIndex(distance_strategy=DistanceStrategy.EUCLIDEAN) vs.apply_vector_index(index, concurrently=True) assert vs.is_valid_index(DEFAULT_INDEX_NAME)