id stringlengths 14 16 | text stringlengths 29 2.73k | source stringlengths 49 117 |
|---|---|---|
e5b2d309cf90-4 | .. code-block:: python
from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
"""
_result: SearxResults = PrivateAttr()
searx_host: str = ""
unsecure: bool = False
params: dict = Field(default_factory=_get_default_params)
headers: Optional[dict] = None
engines: Optional[List[str]] = []
categories: Optional[List[str]] = []
query_suffix: Optional[str] = ""
k: int = 10
aiosession: Optional[Any] = None
@validator("unsecure")
def disable_ssl_warnings(cls, v: bool) -> bool:
"""Disable SSL warnings."""
if v:
# requests.urllib3.disable_warnings()
try:
import urllib3
urllib3.disable_warnings()
except ImportError as e:
print(e)
return v
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate that custom searx params are merged with default ones."""
user_params = values["params"]
default = _get_default_params()
values["params"] = {**default, **user_params}
engines = values.get("engines")
if engines:
values["params"]["engines"] = ",".join(engines)
categories = values.get("categories")
if categories:
values["params"]["categories"] = ",".join(categories) | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
e5b2d309cf90-5 | if categories:
values["params"]["categories"] = ",".join(categories)
searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST")
if not searx_host.startswith("http"):
print(
f"Warning: missing the url scheme on host \
! assuming secure https://{searx_host} "
)
searx_host = "https://" + searx_host
elif searx_host.startswith("http://"):
values["unsecure"] = True
cls.disable_ssl_warnings(True)
values["searx_host"] = searx_host
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _searx_api_query(self, params: dict) -> SearxResults:
"""Actual request to searx API."""
raw_result = requests.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
)
# test if http result is ok
if not raw_result.ok:
raise ValueError("Searx API returned an error: ", raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
async def _asearx_api_query(self, params: dict) -> SearxResults:
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(
self.searx_host,
headers=self.headers,
params=params,
ssl=(lambda: False if self.unsecure else None)(),
) as response: | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
e5b2d309cf90-6 | ) as response:
if not response.ok:
raise ValueError("Searx API returned an error: ", response.text)
result = SearxResults(await response.text())
self._result = result
else:
async with self.aiosession.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
) as response:
if not response.ok:
raise ValueError("Searx API returned an error: ", response.text)
result = SearxResults(await response.text())
self._result = result
return result
[docs] def run(
self,
query: str,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occured with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host") | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
e5b2d309cf90-7 | searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
[docs] async def arun(
self,
query: str,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Asynchronously version of `run`.""" | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
e5b2d309cf90-8 | ) -> str:
"""Asynchronously version of `run`."""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
res = await self._asearx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
[docs] def results(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query. | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
e5b2d309cf90-9 | engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
[docs] async def aresults(
self, | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
e5b2d309cf90-10 | ]
[docs] async def aresults(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Asynchronously query with json results.
Uses aiohttp. See `results` for more info.
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
results = (await self._asearx_api_query(params)).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/searx_search.html |
099a86018d25-0 | Source code for langchain.utilities.serpapi
"""Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, Optional, Tuple
import aiohttp
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
[docs]class SerpAPIWrapper(BaseModel):
"""Wrapper around SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
"""
search_engine: Any #: :meta private:
params: dict = Field(
default={
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
serpapi_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config: | https://python.langchain.com/en/latest/_modules/langchain/utilities/serpapi.html |
099a86018d25-1 | aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please install it with `pip install google-search-results`."
)
return values
[docs] async def arun(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result async."""
return self._process_response(await self.aresults(query))
[docs] def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result."""
return self._process_response(self.results(query))
[docs] def results(self, query: str) -> dict:
"""Run query through SerpAPI and return the raw result."""
params = self.get_params(query)
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
return res
[docs] async def aresults(self, query: str) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async.""" | https://python.langchain.com/en/latest/_modules/langchain/utilities/serpapi.html |
099a86018d25-2 | """Use aiohttp to run query through SerpAPI and return the results async."""
def construct_url_and_params() -> Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params["source"] = "python"
if self.serpapi_api_key:
params["serp_api_key"] = self.serpapi_api_key
params["output"] = "json"
url = "https://serpapi.com/search"
return url, params
url, params = construct_url_and_params()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
res = await response.json()
else:
async with self.aiosession.get(url, params=params) as response:
res = await response.json()
return res
[docs] def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.serpapi_api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict) -> str:
"""Process response from SerpAPI."""
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"] | https://python.langchain.com/en/latest/_modules/langchain/utilities/serpapi.html |
099a86018d25-3 | toret = res["answer_box"]["snippet"]
elif (
"answer_box" in res.keys()
and "snippet_highlighted_words" in res["answer_box"].keys()
):
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif (
"sports_results" in res.keys()
and "game_spotlight" in res["sports_results"].keys()
):
toret = res["sports_results"]["game_spotlight"]
elif (
"shopping_results" in res.keys()
and "title" in res["shopping_results"][0].keys()
):
toret = res["shopping_results"][:3]
elif (
"knowledge_graph" in res.keys()
and "description" in res["knowledge_graph"].keys()
):
toret = res["knowledge_graph"]["description"]
elif "snippet" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["snippet"]
elif "link" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["link"]
else:
toret = "No good search result found"
return toret
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/serpapi.html |
db7c2c7ac492-0 | Source code for langchain.utilities.powerbi
"""Wrapper around a Power BI endpoint."""
from __future__ import annotations
import asyncio
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
import aiohttp
import requests
from aiohttp import ServerTimeoutError
from pydantic import BaseModel, Field, root_validator, validator
from requests.exceptions import Timeout
_LOGGER = logging.getLogger(__name__)
BASE_URL = os.getenv("POWERBI_BASE_URL", "https://api.powerbi.com/v1.0/myorg")
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
[docs]class PowerBIDataset(BaseModel):
"""Create PowerBI engine from dataset ID and credential or token.
Use either the credential or a supplied token to authenticate.
If both are supplied the credential is used to generate a token.
The impersonated_user_name is the UPN of a user to be impersonated.
If the model is not RLS enabled, this will be ignored.
"""
dataset_id: str
table_names: List[str]
group_id: Optional[str] = None
credential: Optional[TokenCredential] = None
token: Optional[str] = None
impersonated_user_name: Optional[str] = None
sample_rows_in_table_info: int = Field(default=1, gt=0, le=10)
schemas: Dict[str, str] = Field(default_factory=dict)
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@validator("table_names", allow_reuse=True)
def fix_table_names(cls, table_names: List[str]) -> List[str]:
"""Fix the table names.""" | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
db7c2c7ac492-1 | """Fix the table names."""
return [fix_table_name(table) for table in table_names]
@root_validator(pre=True, allow_reuse=True)
def token_or_credential_present(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that at least one of token and credentials is present."""
if "token" in values or "credential" in values:
return values
raise ValueError("Please provide either a credential or a token.")
@property
def request_url(self) -> str:
"""Get the request url."""
if self.group_id:
return f"{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
return f"{BASE_URL}/datasets/{self.dataset_id}/executeQueries" # noqa: E501 # pylint: disable=C0301
@property
def headers(self) -> Dict[str, str]:
"""Get the token."""
if self.token:
return {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.token,
}
from azure.core.exceptions import (
ClientAuthenticationError, # pylint: disable=import-outside-toplevel
)
if self.credential:
try:
token = self.credential.get_token(
"https://analysis.windows.net/powerbi/api/.default"
).token
return {
"Content-Type": "application/json",
"Authorization": "Bearer " + token,
}
except Exception as exc: # pylint: disable=broad-exception-caught
raise ClientAuthenticationError(
"Could not get a token from the supplied credentials."
) from exc | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
db7c2c7ac492-2 | "Could not get a token from the supplied credentials."
) from exc
raise ClientAuthenticationError("No credential or token supplied.")
[docs] def get_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
return self.table_names
[docs] def get_schemas(self) -> str:
"""Get the available schema's."""
if self.schemas:
return ", ".join([f"{key}: {value}" for key, value in self.schemas.items()])
return "No known schema's yet. Use the schema_powerbi tool first."
@property
def table_info(self) -> str:
"""Information about all tables in the database."""
return self.get_table_info()
def _get_tables_to_query(
self, table_names: Optional[Union[List[str], str]] = None
) -> Optional[List[str]]:
"""Get the tables names that need to be queried, after checking they exist."""
if table_names is not None:
if (
isinstance(table_names, list)
and len(table_names) > 0
and table_names[0] != ""
):
fixed_tables = [fix_table_name(table) for table in table_names]
non_existing_tables = [
table for table in fixed_tables if table not in self.table_names
]
if non_existing_tables:
_LOGGER.warning(
"Table(s) %s not found in dataset.",
", ".join(non_existing_tables),
)
tables = [
table for table in fixed_tables if table not in non_existing_tables
]
return tables if tables else None
if isinstance(table_names, str) and table_names != "": | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
db7c2c7ac492-3 | if isinstance(table_names, str) and table_names != "":
if table_names not in self.table_names:
_LOGGER.warning("Table %s not found in dataset.", table_names)
return None
return [fix_table_name(table_names)]
return self.table_names
def _get_tables_todo(self, tables_todo: List[str]) -> List[str]:
"""Get the tables that still need to be queried."""
return [table for table in tables_todo if table not in self.schemas]
def _get_schema_for_tables(self, table_names: List[str]) -> str:
"""Create a string of the table schemas for the supplied tables."""
schemas = [
schema for table, schema in self.schemas.items() if table in table_names
]
return ", ".join(schemas)
[docs] def get_table_info(
self, table_names: Optional[Union[List[str], str]] = None
) -> str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return "No (valid) tables requested."
tables_todo = self._get_tables_todo(tables_requested)
for table in tables_todo:
self._get_schema(table)
return self._get_schema_for_tables(tables_requested)
[docs] async def aget_table_info(
self, table_names: Optional[Union[List[str], str]] = None
) -> str:
"""Get information about specified tables."""
tables_requested = self._get_tables_to_query(table_names)
if tables_requested is None:
return "No (valid) tables requested."
tables_todo = self._get_tables_todo(tables_requested) | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
db7c2c7ac492-4 | tables_todo = self._get_tables_todo(tables_requested)
await asyncio.gather(*[self._aget_schema(table) for table in tables_todo])
return self._get_schema_for_tables(tables_requested)
def _get_schema(self, table: str) -> None:
"""Get the schema for a table."""
try:
result = self.run(
f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})"
)
self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"])
except Timeout:
_LOGGER.warning("Timeout while getting table info for %s", table)
self.schemas[table] = "unknown"
except Exception as exc: # pylint: disable=broad-exception-caught
_LOGGER.warning("Error while getting table info for %s: %s", table, exc)
self.schemas[table] = "unknown"
async def _aget_schema(self, table: str) -> None:
"""Get the schema for a table."""
try:
result = await self.arun(
f"EVALUATE TOPN({self.sample_rows_in_table_info}, {table})"
)
self.schemas[table] = json_to_md(result["results"][0]["tables"][0]["rows"])
except ServerTimeoutError:
_LOGGER.warning("Timeout while getting table info for %s", table)
self.schemas[table] = "unknown"
except Exception as exc: # pylint: disable=broad-exception-caught
_LOGGER.warning("Error while getting table info for %s: %s", table, exc)
self.schemas[table] = "unknown" | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
db7c2c7ac492-5 | self.schemas[table] = "unknown"
def _create_json_content(self, command: str) -> dict[str, Any]:
"""Create the json content for the request."""
return {
"queries": [{"query": rf"{command}"}],
"impersonatedUserName": self.impersonated_user_name,
"serializerSettings": {"includeNulls": True},
}
[docs] def run(self, command: str) -> Any:
"""Execute a DAX command and return a json representing the results."""
_LOGGER.debug("Running command: %s", command)
result = requests.post(
self.request_url,
json=self._create_json_content(command),
headers=self.headers,
timeout=10,
)
return result.json()
[docs] async def arun(self, command: str) -> Any:
"""Execute a DAX command and return the result asynchronously."""
_LOGGER.debug("Running command: %s", command)
if self.aiosession:
async with self.aiosession.post(
self.request_url,
headers=self.headers,
json=self._create_json_content(command),
timeout=10,
) as response:
response_json = await response.json()
return response_json
async with aiohttp.ClientSession() as session:
async with session.post(
self.request_url,
headers=self.headers,
json=self._create_json_content(command),
timeout=10,
) as response:
response_json = await response.json()
return response_json
def json_to_md(
json_contents: List[Dict[str, Union[str, int, float]]],
table_name: Optional[str] = None,
) -> str: | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
db7c2c7ac492-6 | table_name: Optional[str] = None,
) -> str:
"""Converts a JSON object to a markdown table."""
output_md = ""
headers = json_contents[0].keys()
for header in headers:
header.replace("[", ".").replace("]", "")
if table_name:
header.replace(f"{table_name}.", "")
output_md += f"| {header} "
output_md += "|\n"
for row in json_contents:
for value in row.values():
output_md += f"| {value} "
output_md += "|\n"
return output_md
def fix_table_name(table: str) -> str:
"""Add single quotes around table names that contain spaces."""
if " " in table and not table.startswith("'") and not table.endswith("'"):
return f"'{table}'"
return table
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/powerbi.html |
af373735ecba-0 | Source code for langchain.utilities.spark_sql
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Iterable, List, Optional
if TYPE_CHECKING:
from pyspark.sql import DataFrame, Row, SparkSession
[docs]class SparkSQL:
def __init__(
self,
spark_session: Optional[SparkSession] = None,
catalog: Optional[str] = None,
schema: Optional[str] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
):
try:
from pyspark.sql import SparkSession
except ImportError:
raise ValueError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
self._spark = (
spark_session if spark_session else SparkSession.builder.getOrCreate()
)
if catalog is not None:
self._spark.catalog.setCurrentCatalog(catalog)
if schema is not None:
self._spark.catalog.setCurrentDatabase(schema)
self._all_tables = set(self._get_all_table_names())
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
) | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
af373735ecba-1 | f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
[docs] @classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> SparkSQL:
"""Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri("sc://localhost:15002")
"""
try:
from pyspark.sql import SparkSession
except ImportError:
raise ValueError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
spark = SparkSession.builder.remote(database_uri).getOrCreate()
return cls(spark, **kwargs)
[docs] def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return self._include_tables
# sorting the result can help LLM understanding it.
return sorted(self._all_tables - self._ignore_tables)
def _get_all_table_names(self) -> Iterable[str]:
rows = self._spark.sql("SHOW TABLES").select("tableName").collect()
return list(map(lambda row: row.tableName, rows))
def _get_create_table_stmt(self, table: str) -> str:
statement = (
self._spark.sql(f"SHOW CREATE TABLE {table}").collect()[0].createtab_stmt | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
af373735ecba-2 | )
# Ignore the data source provider and options to reduce the number of tokens.
using_clause_index = statement.find("USING")
return statement[:using_clause_index] + ";"
[docs] def get_table_info(self, table_names: Optional[List[str]] = None) -> str:
all_table_names = self.get_usable_table_names()
if table_names is not None:
missing_tables = set(table_names).difference(all_table_names)
if missing_tables:
raise ValueError(f"table_names {missing_tables} not found in database")
all_table_names = table_names
tables = []
for table_name in all_table_names:
table_info = self._get_create_table_stmt(table_name)
if self._sample_rows_in_table_info:
table_info += "\n\n/*"
table_info += f"\n{self._get_sample_spark_rows(table_name)}\n"
table_info += "*/"
tables.append(table_info)
final_str = "\n\n".join(tables)
return final_str
def _get_sample_spark_rows(self, table: str) -> str:
query = f"SELECT * FROM {table} LIMIT {self._sample_rows_in_table_info}"
df = self._spark.sql(query)
columns_str = "\t".join(list(map(lambda f: f.name, df.schema.fields)))
try:
sample_rows = self._get_dataframe_results(df)
# save the sample rows in string format
sample_rows_str = "\n".join(["\t".join(row) for row in sample_rows])
except Exception:
sample_rows_str = ""
return (
f"{self._sample_rows_in_table_info} rows from {table} table:\n" | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
af373735ecba-3 | f"{columns_str}\n"
f"{sample_rows_str}"
)
def _convert_row_as_tuple(self, row: Row) -> tuple:
return tuple(map(str, row.asDict().values()))
def _get_dataframe_results(self, df: DataFrame) -> list:
return list(map(self._convert_row_as_tuple, df.collect()))
[docs] def run(self, command: str, fetch: str = "all") -> str:
df = self._spark.sql(command)
if fetch == "one":
df = df.limit(1)
return str(self._get_dataframe_results(df))
[docs] def get_table_info_no_throw(self, table_names: Optional[List[str]] = None) -> str:
"""Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If `sample_rows_in_table_info`, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
"""
try:
return self.get_table_info(table_names)
except ValueError as e:
"""Format the error message"""
return f"Error: {e}"
[docs] def run_no_throw(self, command: str, fetch: str = "all") -> str:
"""Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
"""
try:
from pyspark.errors import PySparkException | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
af373735ecba-4 | """
try:
from pyspark.errors import PySparkException
except ImportError:
raise ValueError(
"pyspark is not installed. Please install it with `pip install pyspark`"
)
try:
return self.run(command, fetch)
except PySparkException as e:
"""Format the error message"""
return f"Error: {e}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/spark_sql.html |
1a294e778a93-0 | Source code for langchain.utilities.arxiv
"""Util that calls Arxiv."""
import logging
import os
from typing import Any, Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
[docs]class ArxivAPIWrapper(BaseModel):
"""Wrapper around ArxivAPI.
To use, you should have the ``arxiv`` python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don't want to limit the content size.
Parameters:
top_k_results: number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool.
load_max_docs: a limit to the number of loaded documents
load_all_available_meta:
if True: the `metadata` of the loaded Documents gets all available meta info
(see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the `metadata` gets only the most informative fields.
"""
arxiv_client: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
top_k_results: int = 3
ARXIV_MAX_QUERY_LENGTH = 300
load_max_docs: int = 100
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object.""" | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
1a294e778a93-1 | class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
values["arxiv_result"] = arxiv.Result
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
[docs] def run(self, query: str) -> str:
"""
Run Arxiv search and get the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
See https://lukasschwab.me/arxiv.py/index.html#Result
It uses only the most informative fields of article meta information.
"""
try:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results
).results()
except self.arxiv_exceptions as ex:
return f"Arxiv exception: {ex}"
docs = [
f"Published: {result.updated.date()}\nTitle: {result.title}\n"
f"Authors: {', '.join(a.name for a in result.authors)}\n"
f"Summary: {result.summary}"
for result in results
]
if docs: | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
1a294e778a93-2 | for result in results
]
if docs:
return "\n\n".join(docs)[: self.doc_content_chars_max]
else:
return "No good Arxiv Result was found"
[docs] def load(self, query: str) -> List[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
"""
try:
import fitz
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
try:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs
).results()
except self.arxiv_exceptions as ex:
logger.debug("Error on arxiv: %s", ex)
return []
docs: List[Document] = []
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = "".join(page.get_text() for page in doc_file)
except FileNotFoundError as f_ex:
logger.debug(f_ex)
continue
if self.load_all_available_meta:
extra_metadata = {
"entry_id": result.entry_id,
"published_first_time": str(result.published.date()),
"comment": result.comment,
"journal_ref": result.journal_ref,
"doi": result.doi,
"primary_category": result.primary_category, | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
1a294e778a93-3 | "doi": result.doi,
"primary_category": result.primary_category,
"categories": result.categories,
"links": [link.href for link in result.links],
}
else:
extra_metadata = {}
metadata = {
"Published": str(result.updated.date()),
"Title": result.title,
"Authors": ", ".join(a.name for a in result.authors),
"Summary": result.summary,
**extra_metadata,
}
doc = Document(
page_content=text[: self.doc_content_chars_max], metadata=metadata
)
docs.append(doc)
os.remove(doc_file_name)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/arxiv.html |
a0f386e3201d-0 | Source code for langchain.utilities.python
import sys
from io import StringIO
from typing import Dict, Optional
from pydantic import BaseModel, Field
[docs]class PythonREPL(BaseModel):
"""Simulates a standalone Python REPL."""
globals: Optional[Dict] = Field(default_factory=dict, alias="_globals")
locals: Optional[Dict] = Field(default_factory=dict, alias="_locals")
[docs] def run(self, command: str) -> str:
"""Run command with own globals/locals and returns anything printed."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
try:
exec(command, self.globals, self.locals)
sys.stdout = old_stdout
output = mystdout.getvalue()
except Exception as e:
sys.stdout = old_stdout
output = repr(e)
return output
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/python.html |
705b72ee8bdb-0 | Source code for langchain.utilities.wolfram_alpha
"""Util that calls WolframAlpha."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
[docs]class WolframAlphaAPIWrapper(BaseModel):
"""Wrapper for Wolfram Alpha.
Docs for using:
1. Go to wolfram alpha and sign up for a developer account
2. Create an app and get your APP ID
3. Save your APP ID into WOLFRAM_ALPHA_APPID env variable
4. pip install wolframalpha
"""
wolfram_client: Any #: :meta private:
wolfram_alpha_appid: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(
values, "wolfram_alpha_appid", "WOLFRAM_ALPHA_APPID"
)
values["wolfram_alpha_appid"] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
"wolframalpha is not installed. "
"Please install it with `pip install wolframalpha`"
)
client = wolframalpha.Client(wolfram_alpha_appid)
values["wolfram_client"] = client
return values
[docs] def run(self, query: str) -> str:
"""Run query through WolframAlpha and parse result."""
res = self.wolfram_client.query(query) | https://python.langchain.com/en/latest/_modules/langchain/utilities/wolfram_alpha.html |
705b72ee8bdb-1 | res = self.wolfram_client.query(query)
try:
assumption = next(res.pods).text
answer = next(res.results).text
except StopIteration:
return "Wolfram Alpha wasn't able to answer it"
if answer is None or answer == "":
# We don't want to return the assumption alone if answer is empty
return "No good Wolfram Alpha Result was found"
else:
return f"Assumption: {assumption} \nAnswer: {answer}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/wolfram_alpha.html |
12f3762803e9-0 | Source code for langchain.utilities.apify
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, root_validator
from langchain.document_loaders import ApifyDatasetLoader
from langchain.document_loaders.base import Document
from langchain.utils import get_from_dict_or_env
[docs]class ApifyWrapper(BaseModel):
"""Wrapper around Apify.
To use, you should have the ``apify-client`` python package installed,
and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass
`apify_api_token` as a named parameter to the constructor.
"""
apify_client: Any
apify_client_async: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(
values, "apify_api_token", "APIFY_API_TOKEN"
)
try:
from apify_client import ApifyClient, ApifyClientAsync
values["apify_client"] = ApifyClient(apify_api_token)
values["apify_client_async"] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ValueError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
[docs] def call_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None, | https://python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
12f3762803e9-1 | *,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
actor_call = self.apify_client.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
[docs] async def acall_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None, | https://python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
12f3762803e9-2 | memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> ApifyDatasetLoader:
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
actor_call = await self.apify_client_async.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/apify.html |
096a6442756a-0 | Source code for langchain.utilities.twilio
"""Util that calls Twilio."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
[docs]class TwilioAPIWrapper(BaseModel):
"""Sms Client using Twilio.
To use, you should have the ``twilio`` python package installed,
and the environment variables ``TWILIO_ACCOUNT_SID``, ``TWILIO_AUTH_TOKEN``, and
``TWILIO_FROM_NUMBER``, or pass `account_sid`, `auth_token`, and `from_number` as
named parameters to the constructor.
Example:
.. code-block:: python
from langchain.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
"""
client: Any #: :meta private:
account_sid: Optional[str] = None
"""Twilio account string identifier."""
auth_token: Optional[str] = None
"""Twilio auth token."""
from_number: Optional[str] = None
"""A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or | https://python.langchain.com/en/latest/_modules/langchain/utilities/twilio.html |
096a6442756a-1 | that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using `messaging_service_sid`, this parameter
must be empty.
""" # noqa: E501
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = False
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from twilio.rest import Client
except ImportError:
raise ImportError(
"Could not import twilio python package. "
"Please install it with `pip install twilio`."
)
account_sid = get_from_dict_or_env(values, "account_sid", "TWILIO_ACCOUNT_SID")
auth_token = get_from_dict_or_env(values, "auth_token", "TWILIO_AUTH_TOKEN")
values["from_number"] = get_from_dict_or_env(
values, "from_number", "TWILIO_FROM_NUMBER"
)
values["client"] = Client(account_sid, auth_token)
return values
[docs] def run(self, body: str, to: str) -> str:
"""Run body through Twilio and respond with message sid.
Args:
body: The text of the message you want to send. Can be up to 1,600
characters in length.
to: The destination phone number in | https://python.langchain.com/en/latest/_modules/langchain/utilities/twilio.html |
096a6442756a-2 | characters in length.
to: The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
""" # noqa: E501
message = self.client.messages.create(to, from_=self.from_number, body=body)
return message.sid
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/twilio.html |
7c54b0a5b210-0 | Source code for langchain.utilities.bash
"""Wrapper around subprocess to run commands."""
from __future__ import annotations
import platform
import re
import subprocess
from typing import TYPE_CHECKING, List, Union
from uuid import uuid4
if TYPE_CHECKING:
import pexpect
def _lazy_import_pexpect() -> pexpect:
"""Import pexpect only when needed."""
if platform.system() == "Windows":
raise ValueError("Persistent bash processes are not yet supported on Windows.")
try:
import pexpect
except ImportError:
raise ImportError(
"pexpect required for persistent bash processes."
" To install, run `pip install pexpect`."
)
return pexpect
[docs]class BashProcess:
"""Executes bash commands and returns the output."""
def __init__(
self,
strip_newlines: bool = False,
return_err_output: bool = False,
persistent: bool = False,
):
"""Initialize with stripping newlines."""
self.strip_newlines = strip_newlines
self.return_err_output = return_err_output
self.prompt = ""
self.process = None
if persistent:
self.prompt = str(uuid4())
self.process = self._initialize_persistent_process(self.prompt)
@staticmethod
def _initialize_persistent_process(prompt: str) -> pexpect.spawn:
# Start bash in a clean environment
# Doesn't work on windows
pexpect = _lazy_import_pexpect()
process = pexpect.spawn(
"env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8"
)
# Set the custom prompt
process.sendline("PS1=" + prompt) | https://python.langchain.com/en/latest/_modules/langchain/utilities/bash.html |
7c54b0a5b210-1 | # Set the custom prompt
process.sendline("PS1=" + prompt)
process.expect_exact(prompt, timeout=10)
return process
[docs] def run(self, commands: Union[str, List[str]]) -> str:
"""Run commands and return final output."""
if isinstance(commands, str):
commands = [commands]
commands = ";".join(commands)
if self.process is not None:
return self._run_persistent(
commands,
)
else:
return self._run(commands)
def _run(self, command: str) -> str:
"""Run commands and return final output."""
try:
output = subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
).stdout.decode()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode()
return str(error)
if self.strip_newlines:
output = output.strip()
return output
[docs] def process_output(self, output: str, command: str) -> str:
# Remove the command from the output using a regular expression
pattern = re.escape(command) + r"\s*\n"
output = re.sub(pattern, "", output, count=1)
return output.strip()
def _run_persistent(self, command: str) -> str:
"""Run commands and return final output."""
pexpect = _lazy_import_pexpect()
if self.process is None:
raise ValueError("Process not initialized")
self.process.sendline(command)
# Clear the output with an empty string
self.process.expect(self.prompt, timeout=10) | https://python.langchain.com/en/latest/_modules/langchain/utilities/bash.html |
7c54b0a5b210-2 | self.process.expect(self.prompt, timeout=10)
self.process.sendline("")
try:
self.process.expect([self.prompt, pexpect.EOF], timeout=10)
except pexpect.TIMEOUT:
return f"Timeout error while executing command {command}"
if self.process.after == pexpect.EOF:
return f"Exited with error status: {self.process.exitstatus}"
output = self.process.before
output = self.process_output(output, command)
if self.strip_newlines:
return output.strip()
return output
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/bash.html |
1f0dd382ff5a-0 | Source code for langchain.utilities.wikipedia
"""Util that calls Wikipedia."""
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
WIKIPEDIA_MAX_QUERY_LENGTH = 300
[docs]class WikipediaAPIWrapper(BaseModel):
"""Wrapper around WikipediaAPI.
To use, you should have the ``wikipedia`` python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
"""
wiki_client: Any #: :meta private:
top_k_results: int = 3
lang: str = "en"
load_all_available_meta: bool = False
doc_content_chars_max: int = 4000
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import wikipedia
wikipedia.set_lang(values["lang"])
values["wiki_client"] = wikipedia
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
return values
[docs] def run(self, query: str) -> str:
"""Run Wikipedia search and get page summaries."""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
summaries = []
for page_title in page_titles[: self.top_k_results]: | https://python.langchain.com/en/latest/_modules/langchain/utilities/wikipedia.html |
1f0dd382ff5a-1 | summaries = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if summary := self._formatted_page_summary(page_title, wiki_page):
summaries.append(summary)
if not summaries:
return "No good Wikipedia Search Result was found"
return "\n\n".join(summaries)[: self.doc_content_chars_max]
@staticmethod
def _formatted_page_summary(page_title: str, wiki_page: Any) -> Optional[str]:
return f"Page: {page_title}\nSummary: {wiki_page.summary}"
def _page_to_document(self, page_title: str, wiki_page: Any) -> Document:
main_meta = {
"title": page_title,
"summary": wiki_page.summary,
"source": wiki_page.url,
}
add_meta = (
{
"categories": wiki_page.categories,
"page_url": wiki_page.url,
"image_urls": wiki_page.images,
"related_titles": wiki_page.links,
"parent_id": wiki_page.parent_id,
"references": wiki_page.references,
"revision_id": wiki_page.revision_id,
"sections": wiki_page.sections,
}
if self.load_all_available_meta
else {}
)
doc = Document(
page_content=wiki_page.content[: self.doc_content_chars_max],
metadata={
**main_meta,
**add_meta,
},
)
return doc
def _fetch_page(self, page: str) -> Optional[str]:
try:
return self.wiki_client.page(title=page, auto_suggest=False)
except (
self.wiki_client.exceptions.PageError, | https://python.langchain.com/en/latest/_modules/langchain/utilities/wikipedia.html |
1f0dd382ff5a-2 | except (
self.wiki_client.exceptions.PageError,
self.wiki_client.exceptions.DisambiguationError,
):
return None
[docs] def load(self, query: str) -> List[Document]:
"""
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
"""
page_titles = self.wiki_client.search(query[:WIKIPEDIA_MAX_QUERY_LENGTH])
docs = []
for page_title in page_titles[: self.top_k_results]:
if wiki_page := self._fetch_page(page_title):
if doc := self._page_to_document(page_title, wiki_page):
docs.append(doc)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/wikipedia.html |
cb43385a4395-0 | Source code for langchain.utilities.graphql
import json
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, Extra, root_validator
[docs]class GraphQLAPIWrapper(BaseModel):
"""Wrapper around GraphQL API.
To use, you should have the ``gql`` python package installed.
This wrapper will use the GraphQL API to conduct queries.
"""
custom_headers: Optional[Dict[str, str]] = None
graphql_endpoint: str
gql_client: Any #: :meta private:
gql_function: Callable[[str], Any] #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
except ImportError as e:
raise ImportError(
"Could not import gql python package. "
f"Try installing it with `pip install gql`. Received error: {e}"
)
headers = values.get("custom_headers")
transport = RequestsHTTPTransport(
url=values["graphql_endpoint"],
headers=headers,
)
client = Client(transport=transport, fetch_schema_from_transport=True)
values["gql_client"] = client
values["gql_function"] = gql
return values
[docs] def run(self, query: str) -> str:
"""Run a GraphQL query and get the results."""
result = self._execute_query(query)
return json.dumps(result, indent=2) | https://python.langchain.com/en/latest/_modules/langchain/utilities/graphql.html |
cb43385a4395-1 | return json.dumps(result, indent=2)
def _execute_query(self, query: str) -> Dict[str, Any]:
"""Execute a GraphQL query and return the results."""
document_node = self.gql_function(query)
result = self.gql_client.execute(document_node)
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/graphql.html |
adb33cb412f8-0 | Source code for langchain.utilities.duckduckgo_search
"""Util that calls DuckDuckGo Search.
No setup required. Free.
https://pypi.org/project/duckduckgo-search/
"""
from typing import Dict, List, Optional
from pydantic import BaseModel, Extra
from pydantic.class_validators import root_validator
[docs]class DuckDuckGoSearchAPIWrapper(BaseModel):
"""Wrapper for DuckDuckGo Search API.
Free and does not require any setup
"""
k: int = 10
region: Optional[str] = "wt-wt"
safesearch: str = "moderate"
time: Optional[str] = "y"
max_results: int = 5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import ddg # noqa: F401
except ImportError:
raise ValueError(
"Could not import duckduckgo-search python package. "
"Please install it with `pip install duckduckgo-search`."
)
return values
[docs] def get_snippets(self, query: str) -> List[str]:
"""Run query through DuckDuckGo and return concatenated results."""
from duckduckgo_search import ddg
results = ddg(
query,
region=self.region,
safesearch=self.safesearch,
time=self.time,
max_results=self.max_results,
)
if results is None or len(results) == 0: | https://python.langchain.com/en/latest/_modules/langchain/utilities/duckduckgo_search.html |
adb33cb412f8-1 | )
if results is None or len(results) == 0:
return ["No good DuckDuckGo Search Result was found"]
snippets = [result["body"] for result in results]
return snippets
[docs] def run(self, query: str) -> str:
snippets = self.get_snippets(query)
return " ".join(snippets)
[docs] def results(self, query: str, num_results: int) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
from duckduckgo_search import ddg
results = ddg(
query,
region=self.region,
safesearch=self.safesearch,
time=self.time,
max_results=num_results,
)
if results is None or len(results) == 0:
return [{"Result": "No good DuckDuckGo Search Result was found"}]
def to_metadata(result: Dict) -> Dict[str, str]:
return {
"snippet": result["body"],
"title": result["title"],
"link": result["href"],
}
return [to_metadata(result) for result in results]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/duckduckgo_search.html |
ed186d59df6c-0 | Source code for langchain.utilities.google_serper
"""Util that calls Google Search using the Serper.dev API."""
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from pydantic.class_validators import root_validator
from pydantic.main import BaseModel
from typing_extensions import Literal
from langchain.utils import get_from_dict_or_env
[docs]class GoogleSerperAPIWrapper(BaseModel):
"""Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable ``SERPER_API_KEY``
set with your API key, or pass `serper_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
"""
k: int = 10
gl: str = "us"
hl: str = "en"
# "places" and "images" is available from Serper but not implemented in the
# parser of run(). They can be used in results()
type: Literal["news", "search", "places", "images"] = "search"
result_key_for_type = {
"news": "news",
"places": "places",
"images": "images",
"search": "organic",
}
tbs: Optional[str] = None
serper_api_key: Optional[str] = None
aiosession: Optional[aiohttp.ClientSession] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator() | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
ed186d59df6c-1 | arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(
values, "serper_api_key", "SERPER_API_KEY"
)
values["serper_api_key"] = serper_api_key
return values
[docs] def results(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
return self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
[docs] def run(self, query: str, **kwargs: Any) -> str:
"""Run query through GoogleSearch and parse result."""
results = self._google_serper_api_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
tbs=self.tbs,
search_type=self.type,
**kwargs,
)
return self._parse_results(results)
[docs] async def aresults(self, query: str, **kwargs: Any) -> Dict:
"""Run query through GoogleSearch."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return results
[docs] async def arun(self, query: str, **kwargs: Any) -> str: | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
ed186d59df6c-2 | """Run query through GoogleSearch and parse result async."""
results = await self._async_google_serper_search_results(
query,
gl=self.gl,
hl=self.hl,
num=self.k,
search_type=self.type,
tbs=self.tbs,
**kwargs,
)
return self._parse_results(results)
def _parse_snippets(self, results: dict) -> List[str]:
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
return [answer_box.get("answer")]
elif answer_box.get("snippet"):
return [answer_box.get("snippet").replace("\n", " ")]
elif answer_box.get("snippetHighlighted"):
return answer_box.get("snippetHighlighted")
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
snippets.append(f"{title}: {entity_type}.")
description = kg.get("description")
if description:
snippets.append(description)
for attribute, value in kg.get("attributes", {}).items():
snippets.append(f"{title} {attribute}: {value}.")
for result in results[self.result_key_for_type[self.type]][: self.k]:
if "snippet" in result:
snippets.append(result["snippet"])
for attribute, value in result.get("attributes", {}).items():
snippets.append(f"{attribute}: {value}.")
if len(snippets) == 0:
return ["No good Google Search Result was found"]
return snippets | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
ed186d59df6c-3 | return ["No good Google Search Result was found"]
return snippets
def _parse_results(self, results: dict) -> str:
return " ".join(self._parse_snippets(results))
def _google_serper_api_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
response = requests.post(
f"https://google.serper.dev/{search_type}", headers=headers, params=params
)
response.raise_for_status()
search_results = response.json()
return search_results
async def _async_google_serper_search_results(
self, search_term: str, search_type: str = "search", **kwargs: Any
) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
url = f"https://google.serper.dev/{search_type}"
params = {
"q": search_term,
**{key: value for key, value in kwargs.items() if value is not None},
}
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
url, params=params, headers=headers, raise_for_status=False
) as response:
search_results = await response.json()
else:
async with self.aiosession.post( | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
ed186d59df6c-4 | else:
async with self.aiosession.post(
url, params=params, headers=headers, raise_for_status=True
) as response:
search_results = await response.json()
return search_results
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/utilities/google_serper.html |
9a2732ef9382-0 | Source code for langchain.chat_models.openai
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
import sys
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Tuple,
Union,
)
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
def _import_tiktoken() -> Any:
try:
import tiktoken
except ImportError:
raise ValueError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install tiktoken`."
)
return tiktoken
def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry( | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-1 | return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage): | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-2 | elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
[docs]class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatOpenAI
openai = ChatOpenAI(model_name="gpt-3.5-turbo")
"""
client: Any #: :meta private:
model_name: str = Field(default="gpt-3.5-turbo", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator.""" | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-3 | leave blank if not using a proxy or service emulator."""
openai_api_base: Optional[str] = None
openai_organization: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: Optional[int] = None
"""Maximum number of tokens to generate."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = cls.all_required_field_names()
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-4 | if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
if openai_api_base:
openai.api_base = openai_api_base
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
try:
values["client"] = openai.ChatCompletion
except AttributeError: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-5 | try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model_name,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError) | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-6 | | retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
[docs] def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model_name}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
for stream_resp in self.completion_with_retry(
messages=message_dicts, **params
): | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-7 | messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
response = self.completion_with_retry(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage], | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-8 | self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
if self.streaming:
inner_completion = ""
role = "assistant"
params["stream"] = True
async for stream_resp in await acompletion_with_retry(
self, messages=message_dicts, **params
):
role = stream_resp["choices"][0]["delta"].get("role", role)
token = stream_resp["choices"][0]["delta"].get("content", "")
inner_completion += token
if run_manager:
await run_manager.on_llm_new_token(token)
message = _convert_dict_to_message(
{"content": inner_completion, "role": role}
)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response = await acompletion_with_retry(
self, messages=message_dicts, **params
)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "openai-chat"
def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
model = self.model_name
if model == "gpt-3.5-turbo": | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-9 | if model == "gpt-3.5-turbo":
# gpt-3.5-turbo may change over time.
# Returning num tokens assuming gpt-3.5-turbo-0301.
model = "gpt-3.5-turbo-0301"
elif model == "gpt-4":
# gpt-4 may change over time.
# Returning num tokens assuming gpt-4-0314.
model = "gpt-4-0314"
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
[docs] def get_token_ids(self, text: str) -> List[int]:
"""Get the tokens present in the text with tiktoken package."""
# tiktoken NOT supported for Python 3.7 or below
if sys.version_info[1] <= 7:
return super().get_token_ids(text)
_, encoding_model = self._get_encoding_model()
return encoding_model.encode(text)
[docs] def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
9a2732ef9382-10 | if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model == "gpt-3.5-turbo-0301":
# every message follows <im_start>{role/name}\n{content}<im_end>\n
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"information on how messages are converted to tokens."
)
num_tokens = 0
messages_dict = [_convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/openai.html |
42d61824ded2-0 | Source code for langchain.chat_models.azure_openai
"""Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping
from pydantic import root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.schema import ChatResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class AzureChatOpenAI(ChatOpenAI):
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``OPENAI_API_TYPE`` (default: ``azure``)
- ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
- ``OPENAI_PROXY``
For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-03-15-preview",
)
Be aware the API version may change.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
deployment_name: str = ""
openai_api_type: str = "azure"
openai_api_base: str = "" | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
42d61824ded2-1 | openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
openai_proxy: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
)
openai_api_version = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
openai_api_type = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
openai.api_type = openai_api_type
openai.api_base = openai_api_base
openai.api_version = openai_api_version
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
if openai_proxy: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
42d61824ded2-2 | openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError( | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
42d61824ded2-3 | raise ValueError(
"Azure has not provided the response due to a content"
" filter being triggered"
)
return super()._create_chat_result(response)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/azure_openai.html |
3939b24bcde2-0 | Source code for langchain.chat_models.google_palm
"""Wrapper around Google's PaLM Chat API."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional
from pydantic import BaseModel, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import google.generativeai as genai
logger = logging.getLogger(__name__)
class ChatGooglePalmError(Exception):
pass
def _truncate_at_stop_tokens(
text: str,
stop: Optional[List[str]],
) -> str:
"""Truncates text at the earliest stop token found."""
if stop is None:
return text
for stop_token in stop:
stop_token_idx = text.find(stop_token)
if stop_token_idx != -1:
text = text[:stop_token_idx]
return text
def _response_to_result(
response: genai.types.ChatResponse,
stop: Optional[List[str]],
) -> ChatResult:
"""Converts a PaLM API response into a LangChain ChatResult."""
if not response.candidates:
raise ChatGooglePalmError("ChatResponse must have at least one candidate.") | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
3939b24bcde2-1 | raise ChatGooglePalmError("ChatResponse must have at least one candidate.")
generations: List[ChatGeneration] = []
for candidate in response.candidates:
author = candidate.get("author")
if author is None:
raise ChatGooglePalmError(f"ChatResponse must have an author: {candidate}")
content = _truncate_at_stop_tokens(candidate.get("content", ""), stop)
if content is None:
raise ChatGooglePalmError(f"ChatResponse must have a content: {candidate}")
if author == "ai":
generations.append(
ChatGeneration(text=content, message=AIMessage(content=content))
)
elif author == "human":
generations.append(
ChatGeneration(
text=content,
message=HumanMessage(content=content),
)
)
else:
generations.append(
ChatGeneration(
text=content,
message=ChatMessage(role=author, content=content),
)
)
return ChatResult(generations=generations)
def _messages_to_prompt_dict(
input_messages: List[BaseMessage],
) -> genai.types.MessagePromptDict:
"""Converts a list of LangChain messages into a PaLM API MessagePrompt structure."""
import google.generativeai as genai
context: str = ""
examples: List[genai.types.MessageDict] = []
messages: List[genai.types.MessageDict] = []
remaining = list(enumerate(input_messages))
while remaining:
index, input_message = remaining.pop(0)
if isinstance(input_message, SystemMessage):
if index != 0:
raise ChatGooglePalmError("System message must be first input message.") | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
3939b24bcde2-2 | raise ChatGooglePalmError("System message must be first input message.")
context = input_message.content
elif isinstance(input_message, HumanMessage) and input_message.example:
if messages:
raise ChatGooglePalmError(
"Message examples must come before other messages."
)
_, next_input_message = remaining.pop(0)
if isinstance(next_input_message, AIMessage) and next_input_message.example:
examples.extend(
[
genai.types.MessageDict(
author="human", content=input_message.content
),
genai.types.MessageDict(
author="ai", content=next_input_message.content
),
]
)
else:
raise ChatGooglePalmError(
"Human example message must be immediately followed by an "
" AI example response."
)
elif isinstance(input_message, AIMessage) and input_message.example:
raise ChatGooglePalmError(
"AI example message must be immediately preceded by a Human "
"example message."
)
elif isinstance(input_message, AIMessage):
messages.append(
genai.types.MessageDict(author="ai", content=input_message.content)
)
elif isinstance(input_message, HumanMessage):
messages.append(
genai.types.MessageDict(author="human", content=input_message.content)
)
elif isinstance(input_message, ChatMessage):
messages.append(
genai.types.MessageDict(
author=input_message.role, content=input_message.content
)
)
else:
raise ChatGooglePalmError(
"Messages without an explicit role not supported by PaLM API."
)
return genai.types.MessagePromptDict(
context=context,
examples=examples, | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
3939b24bcde2-3 | context=context,
examples=examples,
messages=messages,
)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
| retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
| retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def chat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _chat_with_retry(**kwargs: Any) -> Any:
return llm.client.chat(**kwargs)
return _chat_with_retry(**kwargs)
async def achat_with_retry(llm: ChatGooglePalm, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
async def _achat_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.chat_async(**kwargs)
return await _achat_with_retry(**kwargs) | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
3939b24bcde2-4 | return await _achat_with_retry(**kwargs)
[docs]class ChatGooglePalm(BaseChatModel, BaseModel):
"""Wrapper around Google's PaLM Chat API.
To use you must have the google.generativeai Python package installed and
either:
1. The ``GOOGLE_API_KEY``` environment varaible set with your API key, or
2. Pass your API key using the google_api_key kwarg to the ChatGoogle
constructor.
Example:
.. code-block:: python
from langchain.chat_models import ChatGooglePalm
chat = ChatGooglePalm()
"""
client: Any #: :meta private:
model_name: str = "models/chat-bison-001"
"""Model name to use."""
google_api_key: Optional[str] = None
temperature: Optional[float] = None
"""Run inference with this temperature. Must by in the closed
interval [0.0, 1.0]."""
top_p: Optional[float] = None
"""Decode using nucleus sampling: consider the smallest set of tokens whose
probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
top_k: Optional[int] = None
"""Decode using top-k sampling: consider the set of top_k most probable tokens.
Must be positive."""
n: int = 1
"""Number of chat completions to generate for each prompt. Note that the API may
not return the full n completions if duplicates are generated."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists, temperature, top_p, and top_k.""" | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
3939b24bcde2-5 | """Validate api key, python package exists, temperature, top_p, and top_k."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except ImportError:
raise ChatGooglePalmError(
"Could not import google.generativeai python package. "
"Please install it with `pip install google-generativeai`"
)
values["client"] = genai
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive")
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult:
prompt = _messages_to_prompt_dict(messages)
response: genai.types.ChatResponse = chat_with_retry(
self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
)
return _response_to_result(response, stop) | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
3939b24bcde2-6 | )
return _response_to_result(response, stop)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
prompt = _messages_to_prompt_dict(messages)
response: genai.types.ChatResponse = await achat_with_retry(
self,
model=self.model_name,
prompt=prompt,
temperature=self.temperature,
top_p=self.top_p,
top_k=self.top_k,
candidate_count=self.n,
)
return _response_to_result(response, stop)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_name": self.model_name,
"temperature": self.temperature,
"top_p": self.top_p,
"top_k": self.top_k,
"n": self.n,
}
@property
def _llm_type(self) -> str:
return "google-palm-chat"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/google_palm.html |
222c31760ef2-0 | Source code for langchain.chat_models.promptlayer_openai
"""PromptLayer wrapper."""
import datetime
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, ChatResult
[docs]class PromptLayerChatOpenAI(ChatOpenAI):
"""Wrapper around OpenAI Chat large language models and PromptLayer.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerChatOpenAI adds to optional
parameters:
``pl_tags``: List of strings to tag the request with.
``return_pl_id``: If True, the PromptLayer request ID will be
returned in the ``generation_info`` field of the
``Generation`` object.
Example:
.. code-block:: python
from langchain.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
222c31760ef2-1 | ) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
pl_request_id = promptlayer_api_request(
"langchain.PromptLayerChatOpenAI",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop, run_manager) | https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
222c31760ef2-2 | generated_responses = await super()._agenerate(messages, stop, run_manager)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerChatOpenAI.async",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
return_pl_id=self.return_pl_id,
)
if self.return_pl_id:
if generation.generation_info is None or not isinstance(
generation.generation_info, dict
):
generation.generation_info = {}
generation.generation_info["pl_request_id"] = pl_request_id
return generated_responses
@property
def _llm_type(self) -> str:
return "promptlayer-openai-chat"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**super()._identifying_params,
"pl_tags": self.pl_tags,
"return_pl_id": self.return_pl_id,
}
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/promptlayer_openai.html |
be6fd8362bb3-0 | Source code for langchain.chat_models.anthropic
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.anthropic import _AnthropicCommon
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
SystemMessage,
)
[docs]class ChatAnthropic(BaseChatModel, _AnthropicCommon):
r"""Wrapper around Anthropic's large language model.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.llms import Anthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
def _convert_one_message_to_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"{self.HUMAN_PROMPT} {message.content}"
elif isinstance(message, AIMessage): | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
be6fd8362bb3-1 | elif isinstance(message, AIMessage):
message_text = f"{self.AI_PROMPT} {message.content}"
elif isinstance(message, SystemMessage):
message_text = f"{self.HUMAN_PROMPT} <admin>{message.content}</admin>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_text(self, messages: List[BaseMessage]) -> str:
"""Format a list of strings into a single string with necessary newlines.
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary newlines.
"""
return "".join(
self._convert_one_message_to_text(message) for message in messages
)
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
if not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded")
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = self._convert_messages_to_text(messages)
return (
text.rstrip()
) # trim off the trailing ' ' that might come from the "Assistant: "
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
be6fd8362bb3-2 | ) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = self.client.completion_stream(**params)
for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if run_manager:
run_manager.on_llm_new_token(
delta,
)
else:
response = self.client.completion(**params)
completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params}
if stop:
params["stop_sequences"] = stop
if self.streaming:
completion = ""
stream_resp = await self.client.acompletion_stream(**params)
async for data in stream_resp:
delta = data["completion"][len(completion) :]
completion = data["completion"]
if run_manager:
await run_manager.on_llm_new_token(
delta,
)
else:
response = await self.client.acompletion(**params)
completion = response["completion"]
message = AIMessage(content=completion) | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
be6fd8362bb3-3 | completion = response["completion"]
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
[docs] def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/anthropic.html |
89d2778f58b3-0 | Source code for langchain.chat_models.vertexai
"""Wrapper around Google VertexAI chat-based models."""
from dataclasses import dataclass, field
from typing import Dict, List, Optional
from pydantic import root_validator
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.vertexai import _VertexAICommon
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
SystemMessage,
)
from langchain.utilities.vertexai import raise_vertex_import_error
@dataclass
class _MessagePair:
"""InputOutputTextPair represents a pair of input and output texts."""
question: HumanMessage
answer: AIMessage
@dataclass
class _ChatHistory:
"""InputOutputTextPair represents a pair of input and output texts."""
history: List[_MessagePair] = field(default_factory=list)
system_message: Optional[SystemMessage] = None
def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
"""Parse a sequence of messages into history.
A sequence should be either (SystemMessage, HumanMessage, AIMessage,
HumanMessage, AIMessage, ...) or (HumanMessage, AIMessage, HumanMessage,
AIMessage, ...).
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message is odd, or a human message is not followed
by a message from AI (e.g., Human, Human, AI or AI, AI, Human).
"""
if not history: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
89d2778f58b3-1 | """
if not history:
return _ChatHistory()
first_message = history[0]
system_message = first_message if isinstance(first_message, SystemMessage) else None
chat_history = _ChatHistory(system_message=system_message)
messages_left = history[1:] if system_message else history
if len(messages_left) % 2 != 0:
raise ValueError(
f"Amount of messages in history should be even, got {len(messages_left)}!"
)
for question, answer in zip(messages_left[::2], messages_left[1::2]):
if not isinstance(question, HumanMessage) or not isinstance(answer, AIMessage):
raise ValueError(
"A human message should follow a bot one, "
f"got {question.type}, {answer.type}."
)
chat_history.history.append(_MessagePair(question=question, answer=answer))
return chat_history
[docs]class ChatVertexAI(_VertexAICommon, BaseChatModel):
"""Wrapper around Vertex AI large language models."""
model_name: str = "chat-bison"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
from vertexai.preview.language_models import ChatModel
except ImportError:
raise_vertex_import_error()
values["client"] = ChatModel.from_pretrained(values["model_name"])
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> ChatResult: | https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
89d2778f58b3-2 | ) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages.
stop: The list of stop words (optional).
run_manager: The Callbackmanager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
question = messages[-1]
if not isinstance(question, HumanMessage):
raise ValueError(
f"Last message in the list should be from human, got {question.type}."
)
history = _parse_chat_history(messages[:-1])
context = history.system_message.content if history.system_message else None
chat = self.client.start_chat(context=context, **self._default_params)
for pair in history.history:
chat._history.append((pair.question.content, pair.answer.content))
response = chat.send_message(question.content)
text = self._enforce_stop_words(response.text, stop)
return ChatResult(generations=[ChatGeneration(message=AIMessage(content=text))])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> ChatResult:
raise NotImplementedError(
"""Vertex AI doesn't support async requests at the moment."""
)
By Harrison Chase
© Copyright 2023, Harrison Chase. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
89d2778f58b3-3 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/chat_models/vertexai.html |
9e02458598e1-0 | Source code for langchain.retrievers.pinecone_hybrid_search
"""Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def hash_text(text: str) -> str:
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
9e02458598e1-1 | # create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors)
[docs]class PineconeHybridSearchRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
sparse_encoder: Any
index: Any
top_k: int = 4
alpha: float = 0.5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
[docs] def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
)
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try: | https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
9e02458598e1-2 | """Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ValueError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
final_result.append(
Document(page_content=context, metadata=res["metadata"])
)
# return search results as json
return final_result
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/pinecone_hybrid_search.html |
40bfeb179a7f-0 | Source code for langchain.retrievers.azure_cognitive_search
"""Retriever wrapper for Azure Cognitive Search."""
from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
[docs]class AzureCognitiveSearchRetriever(BaseRetriever, BaseModel):
"""Wrapper around Azure Cognitive Search."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
) | https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
40bfeb179a7f-1 | )
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
return base_url + endpoint_path + f"&search={query}"
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
search_results = self._search(query)
return [ | https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
40bfeb179a7f-2 | search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/azure_cognitive_search.html |
e390fae87f32-0 | Source code for langchain.retrievers.vespa_retriever
"""Wrapper for retrieving documents from Vespa."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Sequence, Union
from langchain.schema import BaseRetriever, Document
if TYPE_CHECKING:
from vespa.application import Vespa
[docs]class VespaRetriever(BaseRetriever):
def __init__(
self,
app: Vespa,
body: Dict,
content_field: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self._application = app
self._query_body = body
self._content_field = content_field
self._metadata_fields = metadata_fields or ()
def _query(self, body: Dict) -> List[Document]:
response = self._application.query(body)
if not str(response.status_code).startswith("2"):
raise RuntimeError(
"Could not retrieve data from Vespa. Error code: {}".format(
response.status_code
)
)
root = response.json["root"]
if "errors" in root:
raise RuntimeError(json.dumps(root["errors"]))
docs = []
for child in response.hits:
page_content = child["fields"].pop(self._content_field, "")
if self._metadata_fields == "*":
metadata = child["fields"]
else:
metadata = {mf: child["fields"].get(mf) for mf in self._metadata_fields}
metadata["id"] = child["id"]
docs.append(Document(page_content=page_content, metadata=metadata))
return docs | https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html |
e390fae87f32-1 | docs.append(Document(page_content=page_content, metadata=metadata))
return docs
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
body = self._query_body.copy()
body["query"] = query
return self._query(body)
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
[docs] def get_relevant_documents_with_filter(
self, query: str, *, _filter: Optional[str] = None
) -> List[Document]:
body = self._query_body.copy()
_filter = f" and {_filter}" if _filter else ""
body["yql"] = body["yql"] + _filter
body["query"] = query
return self._query(body)
[docs] @classmethod
def from_params(
cls,
url: str,
content_field: str,
*,
k: Optional[int] = None,
metadata_fields: Union[Sequence[str], Literal["*"]] = (),
sources: Union[Sequence[str], Literal["*"], None] = None,
_filter: Optional[str] = None,
yql: Optional[str] = None,
**kwargs: Any,
) -> VespaRetriever:
"""Instantiate retriever from params.
Args:
url (str): Vespa app URL.
content_field (str): Field in results to return as Document page_content.
k (Optional[int]): Number of Documents to return. Defaults to None.
metadata_fields(Sequence[str] or "*"): Fields in results to include in
document metadata. Defaults to empty tuple (). | https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html |
e390fae87f32-2 | document metadata. Defaults to empty tuple ().
sources (Sequence[str] or "*" or None): Sources to retrieve
from. Defaults to None.
_filter (Optional[str]): Document filter condition expressed in YQL.
Defaults to None.
yql (Optional[str]): Full YQL query to be used. Should not be specified
if _filter or sources are specified. Defaults to None.
kwargs (Any): Keyword arguments added to query body.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
"pyvespa is not installed, please install with `pip install pyvespa`"
)
app = Vespa(url)
body = kwargs.copy()
if yql and (sources or _filter):
raise ValueError(
"yql should only be specified if both sources and _filter are not "
"specified."
)
else:
if metadata_fields == "*":
_fields = "*"
body["summary"] = "short"
else:
_fields = ", ".join([content_field] + list(metadata_fields or []))
_sources = ", ".join(sources) if isinstance(sources, Sequence) else "*"
_filter = f" and {_filter}" if _filter else ""
yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}"
body["yql"] = yql
if k:
body["hits"] = k
return cls(app, body, content_field, metadata_fields=metadata_fields)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/vespa_retriever.html |
bdbdb9731603-0 | Source code for langchain.retrievers.svm
"""SMV Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
[docs]class SVMRetriever(BaseRetriever, BaseModel):
embeddings: Embeddings
index: Any
texts: List[str]
k: int = 4
relevancy_threshold: Optional[float] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) -> SVMRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from sklearn import svm
query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC( | https://python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html |
bdbdb9731603-1 | y[0] = 1
clf = svm.LinearSVC(
class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1
)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
# svm.LinearSVC in scikit-learn is non-deterministic.
# if a text is the same as a query, there is no guarantee
# the query will be in the first index.
# this performs a simple swap, this works because anything
# left of the 0 should be equivalent.
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0]
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[1 : self.k + 1]:
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
):
top_k_results.append(Document(page_content=self.texts[row - 1]))
return top_k_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/svm.html |
d746b098c594-0 | Source code for langchain.retrievers.tfidf
"""TF-IDF Retriever.
Largely based on
https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb"""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from pydantic import BaseModel
from langchain.schema import BaseRetriever, Document
[docs]class TFIDFRetriever(BaseRetriever, BaseModel):
vectorizer: Any
docs: List[Document]
tfidf_array: Any
k: int = 4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
[docs] @classmethod
def from_texts(
cls,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
raise ImportError(
"Could not import scikit-learn, please install with `pip install "
"scikit-learn`."
)
tfidf_params = tfidf_params or {}
vectorizer = TfidfVectorizer(**tfidf_params)
tfidf_array = vectorizer.fit_transform(texts)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)] | https://python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html |
d746b098c594-1 | return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs)
[docs] @classmethod
def from_documents(
cls,
documents: Iterable[Document],
*,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs
)
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
from sklearn.metrics.pairwise import cosine_similarity
query_vec = self.vectorizer.transform(
[query]
) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
results = cosine_similarity(self.tfidf_array, query_vec).reshape(
(-1,)
) # Op -- (n_docs,1) -- Cosine Sim with each doc
return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
return return_docs
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/tfidf.html |
657d8ed07897-0 | Source code for langchain.retrievers.metal
from typing import Any, List, Optional
from langchain.schema import BaseRetriever, Document
[docs]class MetalRetriever(BaseRetriever):
def __init__(self, client: Any, params: Optional[dict] = None):
from metal_sdk.metal import Metal
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
self.client: Metal = client
self.params = params or {}
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/metal.html |
f2756372fc04-0 | Source code for langchain.retrievers.databerry
from typing import List, Optional
import aiohttp
import requests
from langchain.schema import BaseRetriever, Document
[docs]class DataberryRetriever(BaseRetriever):
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
[docs] def get_relevant_documents(self, query: str) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
[docs] async def aget_relevant_documents(self, query: str) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query, | https://python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html |
f2756372fc04-1 | self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/_modules/langchain/retrievers/databerry.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.