id stringlengths 14 16 | text stringlengths 29 2.73k | source stringlengths 49 117 |
|---|---|---|
479f31c64c6b-122 | in, even if not explicitly saved on this class.
Example
from langchain import PipelineAI
pipeline = PipelineAI(pipeline_key="")
Validators
build_extra » all fields
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field pipeline_key: str = ''#
The id or tag of the target pipeline
field pipeline_kwargs: Dict[str, Any] [Optional]#
Holds any pipeline parameters valid for create call not
explicitly specified.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-123 | Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-124 | Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-125 | Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.PredictionGuard[source]#
Wrapper around Prediction Guard large language models.
To use, you should have the predictionguard python package installed, and the
environment variable PREDICTIONGUARD_TOKEN set with your access token, or pass
it as a named parameter to the constructor. To use Prediction Guard’s API along
with OpenAI models, set the environment variable OPENAI_API_KEY with your
OpenAI API key as well.
Example
pgllm = PredictionGuard(model="MPT-7B-Instruct",
token="my-access-token",
output={
"type": "boolean"
})
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field max_tokens: int = 256#
Denotes the number of tokens to predict per generation.
field model: Optional[str] = 'MPT-7B-Instruct'#
Model name to use.
field output: Optional[Dict[str, Any]] = None#
The output type or structure for controlling the LLM output.
field temperature: float = 0.75#
A non-negative float that tunes the degree of randomness in generation.
field token: Optional[str] = None#
Your Prediction Guard access token.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-126 | Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-127 | Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-128 | Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.PromptLayerOpenAI[source]#
Wrapper around OpenAI large language models.
To use, you should have the openai and promptlayer python
package installed, and the environment variable OPENAI_API_KEY
and PROMPTLAYER_API_KEY set with your openAI API key and
promptlayer key respectively. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-129 | promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerOpenAI LLM adds two optional
Parameters
pl_tags – List of strings to tag the request with.
return_pl_id – If True, the PromptLayer request ID will be
returned in the generation_info field of the
Generation object.
Example
from langchain.llms import PromptLayerOpenAI
openai = PromptLayerOpenAI(model_name="text-davinci-003")
Validators
build_extra » all fields
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage# | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-130 | Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
create_llm_result(choices: Any, prompts: List[str], token_usage: Dict[str, int]) → langchain.schema.LLMResult#
Create the LLMResult from the choices and prompts.
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-131 | Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_sub_prompts(params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None) → List[List[str]]#
Get the sub prompts for llm call.
get_token_ids(text: str) → List[int]#
Get the token IDs using the tiktoken package.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
max_tokens_for_prompt(prompt: str) → int#
Calculate the maximum number of tokens possible to generate for a prompt.
Parameters
prompt – The prompt to pass into the model.
Returns | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-132 | Parameters
prompt – The prompt to pass into the model.
Returns
The maximum number of tokens to generate for a prompt.
Example
max_tokens = openai.max_token_for_prompt("Tell me a joke.")
modelname_to_contextsize(modelname: str) → int#
Calculate the maximum number of tokens possible to generate for a model.
Parameters
modelname – The modelname we want to know the context size for.
Returns
The maximum context size
Example
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
prep_streaming_params(stop: Optional[List[str]] = None) → Dict[str, Any]#
Prepare the params for streaming.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
stream(prompt: str, stop: Optional[List[str]] = None) → Generator#
Call OpenAI with streaming flag and return the resulting generator.
BETA: this is a beta feature while we figure out the right abstraction.
Once that happens, this interface could change.
Parameters
prompt – The prompts to pass into the model.
stop – Optional list of stop words to use when generating.
Returns
A generator representing the stream of tokens from OpenAI.
Example
generator = openai.stream("Tell me a joke.")
for token in generator:
yield token | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-133 | for token in generator:
yield token
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.PromptLayerOpenAIChat[source]#
Wrapper around OpenAI large language models.
To use, you should have the openai and promptlayer python
package installed, and the environment variable OPENAI_API_KEY
and PROMPTLAYER_API_KEY set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAIChat LLM can also
be passed here. The PromptLayerOpenAIChat adds two optional
Parameters
pl_tags – List of strings to tag the request with.
return_pl_id – If True, the PromptLayer request ID will be
returned in the generation_info field of the
Generation object.
Example
from langchain.llms import PromptLayerOpenAIChat
openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo")
Validators
build_extra » all fields
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field allowed_special: Union[Literal['all'], AbstractSet[str]] = {}#
Set of special tokens that are allowed。
field disallowed_special: Union[Literal['all'], Collection[str]] = 'all'#
Set of special tokens that are not allowed。
field max_retries: int = 6#
Maximum number of retries to make when generating.
field model_kwargs: Dict[str, Any] [Optional]#
Holds any model parameters valid for create call not explicitly specified.
field model_name: str = 'gpt-3.5-turbo'#
Model name to use.
field prefix_messages: List [Optional]# | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-134 | Model name to use.
field prefix_messages: List [Optional]#
Series of messages for Chat input.
field streaming: bool = False#
Whether to stream the results or not.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-135 | Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]# | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-136 | get_token_ids(text: str) → List[int]#
Get the token IDs using the tiktoken package.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.RWKV[source]#
Wrapper around RWKV language models.
To use, you should have the rwkv python package installed, the
pre-trained model file, and the model’s config information.
Example
from langchain.llms import RWKV | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-137 | Example
from langchain.llms import RWKV
model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32")
# Simplest invocation
response = model("Once upon a time, ")
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field CHUNK_LEN: int = 256#
Batch size for prompt processing.
field max_tokens_per_generation: int = 256#
Maximum number of tokens to generate.
field model: str [Required]#
Path to the pre-trained RWKV model file.
field penalty_alpha_frequency: float = 0.4#
Positive values penalize new tokens based on their existing frequency
in the text so far, decreasing the model’s likelihood to repeat the same
line verbatim..
field penalty_alpha_presence: float = 0.4#
Positive values penalize new tokens based on whether they appear
in the text so far, increasing the model’s likelihood to talk about
new topics..
field rwkv_verbose: bool = True#
Print debug information.
field strategy: str = 'cpu fp32'#
Token context window.
field temperature: float = 1.0#
The temperature to use for sampling.
field tokens_path: str [Required]#
Path to the RWKV tokens file.
field top_p: float = 0.5#
The top-p value to use for sampling.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-138 | Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-139 | Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-140 | Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.Replicate[source]#
Wrapper around Replicate models.
To use, you should have the replicate python package installed,
and the environment variable REPLICATE_API_TOKEN set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-141 | The model param is required, but any other model parameters can also
be passed in with the format input={model_param: value, …}
Example
from langchain.llms import Replicate
replicate = Replicate(model="stability-ai/stable-diffusion: 27b93a2413e7f36cd83da926f365628 0b2931564ff050bf9575f1fdf9bcd7478",
input={"image_dimensions": "512x512"})
Validators
build_extra » all fields
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-142 | Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-143 | Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-144 | Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.SagemakerEndpoint[source]#
Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field content_handler: langchain.llms.sagemaker_endpoint.LLMContentHandler [Required]#
The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
field credentials_profile_name: Optional[str] = None#
The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-145 | See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
field endpoint_kwargs: Optional[Dict] = None#
Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
field endpoint_name: str = ''#
The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region.
field model_kwargs: Optional[Dict] = None#
Key word arguments to pass to the model.
field region_name: str = ''#
The aws region where the Sagemaker model is deployed, eg. us-west-2.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-146 | Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-147 | Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-148 | Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.SelfHostedHuggingFaceLLM[source]#
Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Only supports text-generation, text2text-generation and summarization for now.
Example using from_model_id:from langchain.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):from langchain.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
) | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-149 | "text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
Validators
raise_deprecation » all fields
set_verbose » verbose
field device: int = 0#
Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.
field hardware: Any = None#
Remote hardware to send the inference function to.
field inference_fn: Callable = <function _generate_text>#
Inference function to send to the remote hardware.
field load_fn_kwargs: Optional[dict] = None#
Key word arguments to pass to the model load function.
field model_id: str = 'gpt2'#
Hugging Face model_id to load the model.
field model_kwargs: Optional[dict] = None#
Key word arguments to pass to the model.
field model_load_fn: Callable = <function _load_transformer>#
Function to load the model remotely on the server.
field model_reqs: List[str] = ['./', 'transformers', 'torch']#
Requirements to install on hardware to inference the model.
field task: str = 'text-generation'#
Hugging Face task (“text-generation”, “text2text-generation” or
“summarization”).
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-150 | Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-151 | Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) → langchain.llms.base.LLM#
Init the SelfHostedPipeline from a pipeline object or string.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-152 | Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.SelfHostedPipeline[source]#
Run model inference on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.). | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-153 | cloud like Paperspace, Coreweave, etc.).
To use, you should have the runhouse python package installed.
Example for custom pipeline and inference functions:from langchain.llms import SelfHostedPipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def load_pipeline():
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
return pipeline(
"text-generation", model=model, tokenizer=tokenizer,
max_new_tokens=10
)
def inference_fn(pipeline, prompt, stop = None):
return pipeline(prompt)[0]["generated_text"]
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs, inference_fn=inference_fn
)
Example for <2GB model (can be serialized and sent directly to the server):from langchain.llms import SelfHostedPipeline
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
my_model = ...
llm = SelfHostedPipeline.from_pipeline(
pipeline=my_model,
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Example passing model path for larger models:from langchain.llms import SelfHostedPipeline
import runhouse as rh
import pickle
from transformers import pipeline
generator = pipeline(model="gpt2")
rh.blob(pickle.dumps(generator), path="models/pipeline.pkl"
).save().to(gpu, path="models")
llm = SelfHostedPipeline.from_pipeline( | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-154 | llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=["./", "torch", "transformers"],
)
Validators
raise_deprecation » all fields
set_verbose » verbose
field hardware: Any = None#
Remote hardware to send the inference function to.
field inference_fn: Callable = <function _generate_text>#
Inference function to send to the remote hardware.
field load_fn_kwargs: Optional[dict] = None#
Key word arguments to pass to the model load function.
field model_load_fn: Callable [Required]#
Function to load the model remotely on the server.
field model_reqs: List[str] = ['./', 'torch']#
Requirements to install on hardware to inference the model.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-155 | Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
classmethod from_pipeline(pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any) → langchain.llms.base.LLM[source]#
Init the SelfHostedPipeline from a pipeline object or string. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-156 | Init the SelfHostedPipeline from a pipeline object or string.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-157 | Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.StochasticAI[source]#
Wrapper around StochasticAI large language models.
To use, you should have the environment variable STOCHASTICAI_API_KEY
set with your API key.
Example
from langchain.llms import StochasticAI
stochasticai = StochasticAI(api_url="")
Validators
build_extra » all fields
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field api_url: str = ''#
Model name to use.
field model_kwargs: Dict[str, Any] [Optional]#
Holds any model parameters valid for create call not
explicitly specified.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-158 | Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-159 | Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-160 | Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.VertexAI[source]#
Wrapper around Google Vertex AI large language models.
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field credentials: Any = None#
The default custom credentials (google.auth.credentials.Credentials) to use
field location: str = 'us-central1'# | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-161 | field location: str = 'us-central1'#
The default location to use when making API calls.
field max_output_tokens: int = 128#
Token limit determines the maximum amount of text output from one prompt.
field project: Optional[str] = None#
The default GCP project to use when making Vertex API calls.
field temperature: float = 0.0#
Sampling temperature, it controls the degree of randomness in token selection.
field top_k: int = 40#
How the model selects tokens for output, the next token is selected from
field top_p: float = 0.95#
Tokens are selected from most probable to least until the sum of their
field tuned_model_name: Optional[str] = None#
The name of a tuned model, if it’s provided, model_name is ignored.
field verbose: bool [Optional]#
Whether to print out response text.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-162 | Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model#
Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-163 | Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-164 | Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
pydantic model langchain.llms.Writer[source]#
Wrapper around Writer large language models.
To use, you should have the environment variable WRITER_API_KEY and
WRITER_ORG_ID set with your API key and organization ID respectively.
Example
from langchain import Writer
writer = Writer(model_id="palmyra-base")
Validators
raise_deprecation » all fields
set_verbose » verbose
validate_environment » all fields
field base_url: Optional[str] = None#
Base url to use, if None decides based on model name.
field best_of: Optional[int] = None#
Generates this many completions server-side and returns the “best”.
field logprobs: bool = False#
Whether to return log probabilities.
field max_tokens: Optional[int] = None#
Maximum number of tokens to generate.
field min_tokens: Optional[int] = None#
Minimum number of tokens to generate.
field model_id: str = 'palmyra-instruct'#
Model name to use.
field n: Optional[int] = None#
How many completions to generate.
field presence_penalty: Optional[float] = None#
Penalizes repeated tokens regardless of frequency.
field repetition_penalty: Optional[float] = None#
Penalizes repeated tokens according to frequency.
field stop: Optional[List[str]] = None#
Sequences when completion generation will stop.
field temperature: Optional[float] = None#
What sampling temperature to use. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-165 | field temperature: Optional[float] = None#
What sampling temperature to use.
field top_p: Optional[float] = None#
Total probability mass of tokens to consider at each step.
field verbose: bool [Optional]#
Whether to print out response text.
field writer_api_key: Optional[str] = None#
Writer API key.
field writer_org_id: Optional[str] = None#
Writer organization ID.
__call__(prompt: str, stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → str#
Check Cache and run the LLM on the given prompt and input.
async agenerate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
async agenerate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
async apredict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
async apredict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
classmethod construct(_fields_set: Optional[SetStr] = None, **values: Any) → Model# | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-166 | Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data.
Default values are respected, but no other validation is performed.
Behaves as if Config.extra = ‘allow’ was set since it adds all passed values
copy(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, update: Optional[DictStrAny] = None, deep: bool = False) → Model#
Duplicate a model, optionally choose which fields to include, exclude and change.
Parameters
include – fields to include in new model
exclude – fields to exclude from new model, as with values this takes precedence over include
update – values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
deep – set to True to make a deep copy of the model
Returns
new model instance
dict(**kwargs: Any) → Dict#
Return a dictionary of the LLM.
generate(prompts: List[str], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Run the LLM on the given prompt and input.
generate_prompt(prompts: List[langchain.schema.PromptValue], stop: Optional[List[str]] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None) → langchain.schema.LLMResult#
Take in a list of prompt values and return an LLMResult.
get_num_tokens(text: str) → int#
Get the number of tokens present in the text. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-167 | Get the number of tokens present in the text.
get_num_tokens_from_messages(messages: List[langchain.schema.BaseMessage]) → int#
Get the number of tokens in the message.
get_token_ids(text: str) → List[int]#
Get the token present in the text.
json(*, include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None, by_alias: bool = False, skip_defaults: Optional[bool] = None, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, encoder: Optional[Callable[[Any], Any]] = None, models_as_dict: bool = True, **dumps_kwargs: Any) → unicode#
Generate a JSON representation of the model, include and exclude arguments as per dict().
encoder is an optional function to supply as default to json.dumps(), other arguments as per json.dumps().
predict(text: str, *, stop: Optional[Sequence[str]] = None) → str#
Predict text from text.
predict_messages(messages: List[langchain.schema.BaseMessage], *, stop: Optional[Sequence[str]] = None) → langchain.schema.BaseMessage#
Predict message from messages.
save(file_path: Union[pathlib.Path, str]) → None#
Save the LLM.
Parameters
file_path – Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path=”path/llm.yaml”)
classmethod update_forward_refs(**localns: Any) → None#
Try to update ForwardRefs on fields based on this Model, globalns and localns.
previous
Writer
next
Chat Models
By Harrison Chase
© Copyright 2023, Harrison Chase. | https://python.langchain.com/en/latest/reference/modules/llms.html |
479f31c64c6b-168 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/reference/modules/llms.html |
f0475204cd47-0 | .rst
.pdf
Utilities
Utilities#
General utilities.
pydantic model langchain.utilities.ApifyWrapper[source]#
Wrapper around Apify.
To use, you should have the apify-client python package installed,
and the environment variable APIFY_API_TOKEN set with your API key, or pass
apify_api_token as a named parameter to the constructor.
field apify_client: Any = None#
field apify_client_async: Any = None#
async acall_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) → langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]#
Run an Actor on the Apify platform and wait for results to be ready.
Parameters
actor_id (str) – The ID or name of the Actor on the Apify platform.
run_input (Dict) – The input object of the Actor that you’re trying to run.
dataset_mapping_function (Callable) – A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional) – Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional) – Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional) – Optional timeout for the run, in seconds.
Returns
A loader that will fetch the records from theActor run’s default dataset.
Return type
ApifyDatasetLoader | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-1 | Return type
ApifyDatasetLoader
call_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) → langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]#
Run an Actor on the Apify platform and wait for results to be ready.
Parameters
actor_id (str) – The ID or name of the Actor on the Apify platform.
run_input (Dict) – The input object of the Actor that you’re trying to run.
dataset_mapping_function (Callable) – A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional) – Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional) – Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional) – Optional timeout for the run, in seconds.
Returns
A loader that will fetch the records from theActor run’s default dataset.
Return type
ApifyDatasetLoader
pydantic model langchain.utilities.ArxivAPIWrapper[source]#
Wrapper around ArxivAPI.
To use, you should have the arxiv python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don’t want to limit the content size.
Parameters | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-2 | Set doc_content_chars_max=None if you don’t want to limit the content size.
Parameters
top_k_results – number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH – the cut limit on the query used for the arxiv tool.
load_max_docs – a limit to the number of loaded documents
load_all_available_meta –
if True: the metadata of the loaded Documents gets all available meta info(see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the metadata gets only the most informative fields.
field arxiv_exceptions: Any = None#
field doc_content_chars_max: int = 4000#
field load_all_available_meta: bool = False#
field load_max_docs: int = 100#
field top_k_results: int = 3#
load(query: str) → List[langchain.schema.Document][source]#
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
run(query: str) → str[source]#
Run Arxiv search and get the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
See https://lukasschwab.me/arxiv.py/index.html#Result
It uses only the most informative fields of article meta information.
class langchain.utilities.BashProcess(strip_newlines: bool = False, return_err_output: bool = False, persistent: bool = False)[source]#
Executes bash commands and returns the output.
process_output(output: str, command: str) → str[source]#
run(commands: Union[str, List[str]]) → str[source]#
Run commands and return final output. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-3 | Run commands and return final output.
pydantic model langchain.utilities.BingSearchAPIWrapper[source]#
Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
field bing_search_url: str [Required]#
field bing_subscription_key: str [Required]#
field k: int = 10#
results(query: str, num_results: int) → List[Dict][source]#
Run query through BingSearch and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
Return type
A list of dictionaries with the following keys
run(query: str) → str[source]#
Run query through BingSearch and parse result.
pydantic model langchain.utilities.DuckDuckGoSearchAPIWrapper[source]#
Wrapper for DuckDuckGo Search API.
Free and does not require any setup
field k: int = 10#
field max_results: int = 5#
field region: Optional[str] = 'wt-wt'#
field safesearch: str = 'moderate'#
field time: Optional[str] = 'y'#
get_snippets(query: str) → List[str][source]#
Run query through DuckDuckGo and return concatenated results.
results(query: str, num_results: int) → List[Dict[str, str]][source]#
Run query through DuckDuckGo and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-4 | num_results – The number of results to return.
Returns
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
Return type
A list of dictionaries with the following keys
run(query: str) → str[source]#
pydantic model langchain.utilities.GooglePlacesAPIWrapper[source]#
Wrapper around Google Places API.
To use, you should have the googlemaps python package installed,an API key for the google maps platform,
and the enviroment variable ‘’GPLACES_API_KEY’’
set with your API key , or pass ‘gplaces_api_key’
as a named parameter to the constructor.
By default, this will return the all the results on the input query.You can use the top_k_results argument to limit the number of results.
Example
from langchain import GooglePlacesAPIWrapper
gplaceapi = GooglePlacesAPIWrapper()
field gplaces_api_key: Optional[str] = None#
field top_k_results: Optional[int] = None#
fetch_place_details(place_id: str) → Optional[str][source]#
format_place_details(place_details: Dict[str, Any]) → Optional[str][source]#
run(query: str) → str[source]#
Run Places search and get k number of places that exists that match.
pydantic model langchain.utilities.GoogleSearchAPIWrapper[source]#
Wrapper for Google Search API.
Adapted from: Instructions adapted from https://stackoverflow.com/questions/
37083058/
programmatically-searching-google-in-python-using-custom-search
TODO: DOCS for using it
1. Install google-api-python-client
- If you don’t already have a Google account, sign up.
- If you have never created a Google APIs Console project,
read the Managing Projects page and create a project in the Google API Console. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-5 | read the Managing Projects page and create a project in the Google API Console.
- Install the library using pip install google-api-python-client
The current version of the library is 2.70.0 at this time
2. To create an API key:
- Navigate to the APIs & Services→Credentials panel in Cloud Console.
- Select Create credentials, then select API key from the drop-down menu.
- The API key created dialog box displays your newly created key.
- You now have an API_KEY
3. Setup Custom Search Engine so you can search the entire web
- Create a custom search engine in this link.
- In Sites to search, add any valid URL (i.e. www.stackoverflow.com).
- That’s all you have to fill up, the rest doesn’t matter.
In the left-side menu, click Edit search engine → {your search engine name}
→ Setup Set Search the entire web to ON. Remove the URL you added from
the list of Sites to search.
- Under Search engine ID you’ll find the search-engine-ID.
4. Enable the Custom Search API
- Navigate to the APIs & Services→Dashboard panel in Cloud Console.
- Click Enable APIs and Services.
- Search for Custom Search API and click on it.
- Click Enable.
URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis
.com
field google_api_key: Optional[str] = None#
field google_cse_id: Optional[str] = None#
field k: int = 10#
field siterestrict: bool = False#
results(query: str, num_results: int) → List[Dict][source]#
Run query through GoogleSearch and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns
snippet - The description of the result. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-6 | Returns
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
Return type
A list of dictionaries with the following keys
run(query: str) → str[source]#
Run query through GoogleSearch and parse result.
pydantic model langchain.utilities.GoogleSerperAPIWrapper[source]#
Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable SERPER_API_KEY
set with your API key, or pass serper_api_key as a named parameter
to the constructor.
Example
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field gl: str = 'us'#
field hl: str = 'en'#
field k: int = 10#
field serper_api_key: Optional[str] = None#
field tbs: Optional[str] = None#
field type: Literal['news', 'search', 'places', 'images'] = 'search'#
async aresults(query: str, **kwargs: Any) → Dict[source]#
Run query through GoogleSearch.
async arun(query: str, **kwargs: Any) → str[source]#
Run query through GoogleSearch and parse result async.
results(query: str, **kwargs: Any) → Dict[source]#
Run query through GoogleSearch.
run(query: str, **kwargs: Any) → str[source]#
Run query through GoogleSearch and parse result.
pydantic model langchain.utilities.GraphQLAPIWrapper[source]#
Wrapper around GraphQL API.
To use, you should have the gql python package installed. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-7 | Wrapper around GraphQL API.
To use, you should have the gql python package installed.
This wrapper will use the GraphQL API to conduct queries.
field custom_headers: Optional[Dict[str, str]] = None#
field graphql_endpoint: str [Required]#
run(query: str) → str[source]#
Run a GraphQL query and get the results.
pydantic model langchain.utilities.LambdaWrapper[source]#
Wrapper for AWS Lambda SDK.
Docs for using:
pip install boto3
Create a lambda function using the AWS Console or CLI
Run aws configure and enter your AWS credentials
field awslambda_tool_description: Optional[str] = None#
field awslambda_tool_name: Optional[str] = None#
field function_name: Optional[str] = None#
run(query: str) → str[source]#
Invoke Lambda function and parse result.
pydantic model langchain.utilities.MetaphorSearchAPIWrapper[source]#
Wrapper for Metaphor Search API.
field k: int = 10#
field metaphor_api_key: str [Required]#
results(query: str, num_results: int) → List[Dict][source]#
Run query through Metaphor Search and return metadata.
Parameters
query – The query to search for.
num_results – The number of results to return.
Returns
title - The title of the
url - The url
author - Author of the content, if applicable. Otherwise, None.
date_created - Estimated date created,
in YYYY-MM-DD format. Otherwise, None.
Return type
A list of dictionaries with the following keys
async results_async(query: str, num_results: int) → List[Dict][source]#
Get results from the Metaphor Search API asynchronously.
pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]# | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-8 | pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]#
Wrapper for OpenWeatherMap API using PyOWM.
Docs for using:
Go to OpenWeatherMap and sign up for an API key
Save your API KEY into OPENWEATHERMAP_API_KEY env variable
pip install pyowm
field openweathermap_api_key: Optional[str] = None#
field owm: Any = None#
run(location: str) → str[source]#
Get the current weather information for a specified location.
pydantic model langchain.utilities.PowerBIDataset[source]#
Create PowerBI engine from dataset ID and credential or token.
Use either the credential or a supplied token to authenticate.
If both are supplied the credential is used to generate a token.
The impersonated_user_name is the UPN of a user to be impersonated.
If the model is not RLS enabled, this will be ignored.
Validators
fix_table_names » table_names
token_or_credential_present » all fields
field aiosession: Optional[aiohttp.ClientSession] = None#
field credential: Optional[TokenCredential] = None#
field dataset_id: str [Required]#
field group_id: Optional[str] = None#
field impersonated_user_name: Optional[str] = None#
field sample_rows_in_table_info: int = 1#
Constraints
exclusiveMinimum = 0
maximum = 10
field schemas: Dict[str, str] [Optional]#
field table_names: List[str] [Required]#
field token: Optional[str] = None#
async aget_table_info(table_names: Optional[Union[List[str], str]] = None) → str[source]#
Get information about specified tables.
async arun(command: str) → Any[source]#
Execute a DAX command and return the result asynchronously. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-9 | Execute a DAX command and return the result asynchronously.
get_schemas() → str[source]#
Get the available schema’s.
get_table_info(table_names: Optional[Union[List[str], str]] = None) → str[source]#
Get information about specified tables.
get_table_names() → Iterable[str][source]#
Get names of tables available.
run(command: str) → Any[source]#
Execute a DAX command and return a json representing the results.
property headers: Dict[str, str]#
Get the token.
property request_url: str#
Get the request url.
property table_info: str#
Information about all tables in the database.
pydantic model langchain.utilities.PythonREPL[source]#
Simulates a standalone Python REPL.
field globals: Optional[Dict] [Optional] (alias '_globals')#
field locals: Optional[Dict] [Optional] (alias '_locals')#
run(command: str) → str[source]#
Run command with own globals/locals and returns anything printed.
pydantic model langchain.utilities.SearxSearchWrapper[source]#
Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
searx_host or exporting the environment variable SEARX_HOST.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
unsecure. You can also pass the host url scheme as http to disable SSL.
Example
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:from langchain.utilities import SearxSearchWrapper | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-10 | Example with SSL disabled:from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
Validators
disable_ssl_warnings » unsecure
validate_params » all fields
field aiosession: Optional[Any] = None#
field categories: Optional[List[str]] = []#
field engines: Optional[List[str]] = []#
field headers: Optional[dict] = None#
field k: int = 10#
field params: dict [Optional]#
field query_suffix: Optional[str] = ''#
field searx_host: str = ''#
field unsecure: bool = False#
async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Asynchronously query with json results.
Uses aiohttp. See results for more info.
async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
Asynchronously version of run.
results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]#
Run query through Searx API and returns the results with metadata.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
num_results – Limit the number of results to return.
engines – List of engines to use for the query. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-11 | engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
{snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
Return type
Dict with the following keys
run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]#
Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Parameters
query – The query to search for.
query_suffix – Extra suffix appended to the query.
engines – List of engines to use for the query.
categories – List of categories to use for the query.
**kwargs – extra parameters to pass to the searx API.
Returns
The result of the query.
Return type
str
Raises
ValueError – If an error occured with the query.
Example
This will make a query to the qwant engine:
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
pydantic model langchain.utilities.SerpAPIWrapper[source]# | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-12 | pydantic model langchain.utilities.SerpAPIWrapper[source]#
Wrapper around SerpAPI.
To use, you should have the google-search-results python package installed,
and the environment variable SERPAPI_API_KEY set with your API key, or pass
serpapi_api_key as a named parameter to the constructor.
Example
from langchain import SerpAPIWrapper
serpapi = SerpAPIWrapper()
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field params: dict = {'engine': 'google', 'gl': 'us', 'google_domain': 'google.com', 'hl': 'en'}#
field serpapi_api_key: Optional[str] = None#
async aresults(query: str) → dict[source]#
Use aiohttp to run query through SerpAPI and return the results async.
async arun(query: str, **kwargs: Any) → str[source]#
Run query through SerpAPI and parse result async.
get_params(query: str) → Dict[str, str][source]#
Get parameters for SerpAPI.
results(query: str) → dict[source]#
Run query through SerpAPI and return the raw result.
run(query: str, **kwargs: Any) → str[source]#
Run query through SerpAPI and parse result.
class langchain.utilities.SparkSQL(spark_session: Optional[SparkSession] = None, catalog: Optional[str] = None, schema: Optional[str] = None, ignore_tables: Optional[List[str]] = None, include_tables: Optional[List[str]] = None, sample_rows_in_table_info: int = 3)[source]#
classmethod from_uri(database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any) → langchain.utilities.spark_sql.SparkSQL[source]# | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-13 | Creating a remote Spark Session via Spark connect.
For example: SparkSQL.from_uri(“sc://localhost:15002”)
get_table_info(table_names: Optional[List[str]] = None) → str[source]#
get_table_info_no_throw(table_names: Optional[List[str]] = None) → str[source]#
Get information about specified tables.
Follows best practices as specified in: Rajkumar et al, 2022
(https://arxiv.org/abs/2204.00498)
If sample_rows_in_table_info, the specified number of sample rows will be
appended to each table description. This can increase performance as
demonstrated in the paper.
get_usable_table_names() → Iterable[str][source]#
Get names of tables available.
run(command: str, fetch: str = 'all') → str[source]#
run_no_throw(command: str, fetch: str = 'all') → str[source]#
Execute a SQL command and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
If the statement throws an error, the error message is returned.
pydantic model langchain.utilities.TextRequestsWrapper[source]#
Lightweight wrapper around requests library.
The main purpose of this wrapper is to always return a text output.
field aiosession: Optional[aiohttp.client.ClientSession] = None#
field headers: Optional[Dict[str, str]] = None#
async adelete(url: str, **kwargs: Any) → str[source]#
DELETE the URL and return the text asynchronously.
async aget(url: str, **kwargs: Any) → str[source]#
GET the URL and return the text asynchronously. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-14 | GET the URL and return the text asynchronously.
async apatch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PATCH the URL and return the text asynchronously.
async apost(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
POST to the URL and return the text asynchronously.
async aput(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PUT the URL and return the text asynchronously.
delete(url: str, **kwargs: Any) → str[source]#
DELETE the URL and return the text.
get(url: str, **kwargs: Any) → str[source]#
GET the URL and return the text.
patch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PATCH the URL and return the text.
post(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
POST to the URL and return the text.
put(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
PUT the URL and return the text.
property requests: langchain.requests.Requests#
pydantic model langchain.utilities.TwilioAPIWrapper[source]#
Sms Client using Twilio.
To use, you should have the twilio python package installed,
and the environment variables TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, and
TWILIO_FROM_NUMBER, or pass account_sid, auth_token, and from_number as
named parameters to the constructor.
Example
from langchain.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx", | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-15 | account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
field account_sid: Optional[str] = None#
Twilio account string identifier.
field auth_token: Optional[str] = None#
Twilio auth token.
field from_number: Optional[str] = None#
A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using messaging_service_sid, this parameter
must be empty.
run(body: str, to: str) → str[source]#
Run body through Twilio and respond with message sid.
Parameters
body – The text of the message you want to send. Can be up to 1,600
characters in length.
to – The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
pydantic model langchain.utilities.WikipediaAPIWrapper[source]#
Wrapper around WikipediaAPI.
To use, you should have the wikipedia python package installed. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
f0475204cd47-16 | Wrapper around WikipediaAPI.
To use, you should have the wikipedia python package installed.
This wrapper will use the Wikipedia API to conduct searches and
fetch page summaries. By default, it will return the page summaries
of the top-k results.
It limits the Document content by doc_content_chars_max.
field doc_content_chars_max: int = 4000#
field lang: str = 'en'#
field load_all_available_meta: bool = False#
field top_k_results: int = 3#
load(query: str) → List[langchain.schema.Document][source]#
Run Wikipedia search and get the article text plus the meta information.
See
Returns: a list of documents.
run(query: str) → str[source]#
Run Wikipedia search and get page summaries.
pydantic model langchain.utilities.WolframAlphaAPIWrapper[source]#
Wrapper for Wolfram Alpha.
Docs for using:
Go to wolfram alpha and sign up for a developer account
Create an app and get your APP ID
Save your APP ID into WOLFRAM_ALPHA_APPID env variable
pip install wolframalpha
field wolfram_alpha_appid: Optional[str] = None#
run(query: str) → str[source]#
Run query through WolframAlpha and parse result.
previous
Agent Toolkits
next
Experimental Modules
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/reference/modules/utilities.html |
ee26bc64bd63-0 | .rst
.pdf
Experimental Modules
Contents
Autonomous Agents
Generative Agents
Experimental Modules#
This module contains experimental modules and reproductions of existing work using LangChain primitives.
Autonomous Agents#
Here, we document the BabyAGI and AutoGPT classes from the langchain.experimental module.
class langchain.experimental.BabyAGI(*, memory: Optional[langchain.schema.BaseMemory] = None, callbacks: Optional[Union[List[langchain.callbacks.base.BaseCallbackHandler], langchain.callbacks.base.BaseCallbackManager]] = None, callback_manager: Optional[langchain.callbacks.base.BaseCallbackManager] = None, verbose: bool = None, task_list: collections.deque = None, task_creation_chain: langchain.chains.base.Chain, task_prioritization_chain: langchain.chains.base.Chain, execution_chain: langchain.chains.base.Chain, task_id_counter: int = 1, vectorstore: langchain.vectorstores.base.VectorStore, max_iterations: Optional[int] = None)[source]#
Controller model for the BabyAGI agent.
model Config[source]#
Configuration for this pydantic object.
arbitrary_types_allowed = True#
execute_task(objective: str, task: str, k: int = 5) → str[source]#
Execute a task.
classmethod from_llm(llm: langchain.base_language.BaseLanguageModel, vectorstore: langchain.vectorstores.base.VectorStore, verbose: bool = False, task_execution_chain: Optional[langchain.chains.base.Chain] = None, **kwargs: Dict[str, Any]) → langchain.experimental.autonomous_agents.baby_agi.baby_agi.BabyAGI[source]#
Initialize the BabyAGI Controller.
get_next_task(result: str, task_description: str, objective: str) → List[Dict][source]#
Get the next task. | https://python.langchain.com/en/latest/reference/modules/experimental.html |
ee26bc64bd63-1 | Get the next task.
property input_keys: List[str]#
Input keys this chain expects.
property output_keys: List[str]#
Output keys this chain expects.
prioritize_tasks(this_task_id: int, objective: str) → List[Dict][source]#
Prioritize tasks.
class langchain.experimental.AutoGPT(ai_name: str, memory: langchain.vectorstores.base.VectorStoreRetriever, chain: langchain.chains.llm.LLMChain, output_parser: langchain.experimental.autonomous_agents.autogpt.output_parser.BaseAutoGPTOutputParser, tools: List[langchain.tools.base.BaseTool], feedback_tool: Optional[langchain.tools.human.tool.HumanInputRun] = None)[source]#
Agent class for interacting with Auto-GPT.
Generative Agents#
Here, we document the GenerativeAgent and GenerativeAgentMemory classes from the langchain.experimental module.
class langchain.experimental.GenerativeAgent(*, name: str, age: Optional[int] = None, traits: str = 'N/A', status: str, memory: langchain.experimental.generative_agents.memory.GenerativeAgentMemory, llm: langchain.base_language.BaseLanguageModel, verbose: bool = False, summary: str = '', summary_refresh_seconds: int = 3600, last_refreshed: datetime.datetime = None, daily_summaries: List[str] = None)[source]#
A character with memory and innate characteristics.
model Config[source]#
Configuration for this pydantic object.
arbitrary_types_allowed = True#
field age: Optional[int] = None#
The optional age of the character.
field daily_summaries: List[str] [Optional]#
Summary of the events in the plan that the agent took. | https://python.langchain.com/en/latest/reference/modules/experimental.html |
ee26bc64bd63-2 | Summary of the events in the plan that the agent took.
generate_dialogue_response(observation: str, now: Optional[datetime.datetime] = None) → Tuple[bool, str][source]#
React to a given observation.
generate_reaction(observation: str, now: Optional[datetime.datetime] = None) → Tuple[bool, str][source]#
React to a given observation.
get_full_header(force_refresh: bool = False, now: Optional[datetime.datetime] = None) → str[source]#
Return a full header of the agent’s status, summary, and current time.
get_summary(force_refresh: bool = False, now: Optional[datetime.datetime] = None) → str[source]#
Return a descriptive summary of the agent.
field last_refreshed: datetime.datetime [Optional]#
The last time the character’s summary was regenerated.
field llm: langchain.base_language.BaseLanguageModel [Required]#
The underlying language model.
field memory: langchain.experimental.generative_agents.memory.GenerativeAgentMemory [Required]#
The memory object that combines relevance, recency, and ‘importance’.
field name: str [Required]#
The character’s name.
field status: str [Required]#
The traits of the character you wish not to change.
summarize_related_memories(observation: str) → str[source]#
Summarize memories that are most relevant to an observation.
field summary: str = ''#
Stateful self-summary generated via reflection on the character’s memory.
field summary_refresh_seconds: int = 3600#
How frequently to re-generate the summary.
field traits: str = 'N/A'#
Permanent traits to ascribe to the character. | https://python.langchain.com/en/latest/reference/modules/experimental.html |
ee26bc64bd63-3 | field traits: str = 'N/A'#
Permanent traits to ascribe to the character.
class langchain.experimental.GenerativeAgentMemory(*, llm: langchain.base_language.BaseLanguageModel, memory_retriever: langchain.retrievers.time_weighted_retriever.TimeWeightedVectorStoreRetriever, verbose: bool = False, reflection_threshold: Optional[float] = None, current_plan: List[str] = [], importance_weight: float = 0.15, aggregate_importance: float = 0.0, max_tokens_limit: int = 1200, queries_key: str = 'queries', most_recent_memories_token_key: str = 'recent_memories_token', add_memory_key: str = 'add_memory', relevant_memories_key: str = 'relevant_memories', relevant_memories_simple_key: str = 'relevant_memories_simple', most_recent_memories_key: str = 'most_recent_memories', now_key: str = 'now', reflecting: bool = False)[source]#
add_memory(memory_content: str, now: Optional[datetime.datetime] = None) → List[str][source]#
Add an observation or memory to the agent’s memory.
field aggregate_importance: float = 0.0#
Track the sum of the ‘importance’ of recent memories.
Triggers reflection when it reaches reflection_threshold.
clear() → None[source]#
Clear memory contents.
field current_plan: List[str] = []#
The current plan of the agent.
fetch_memories(observation: str, now: Optional[datetime.datetime] = None) → List[langchain.schema.Document][source]#
Fetch related memories.
field importance_weight: float = 0.15#
How much weight to assign the memory importance.
field llm: langchain.base_language.BaseLanguageModel [Required]#
The core language model. | https://python.langchain.com/en/latest/reference/modules/experimental.html |
ee26bc64bd63-4 | The core language model.
load_memory_variables(inputs: Dict[str, Any]) → Dict[str, str][source]#
Return key-value pairs given the text input to the chain.
field memory_retriever: langchain.retrievers.time_weighted_retriever.TimeWeightedVectorStoreRetriever [Required]#
The retriever to fetch related memories.
property memory_variables: List[str]#
Input keys this memory class will load dynamically.
pause_to_reflect(now: Optional[datetime.datetime] = None) → List[str][source]#
Reflect on recent observations and generate ‘insights’.
field reflection_threshold: Optional[float] = None#
When aggregate_importance exceeds reflection_threshold, stop to reflect.
save_context(inputs: Dict[str, Any], outputs: Dict[str, Any]) → None[source]#
Save the context of this model run to memory.
previous
Utilities
next
Integrations
Contents
Autonomous Agents
Generative Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/reference/modules/experimental.html |
1abd6a2753af-0 | .rst
.pdf
Document Transformers
Document Transformers#
Transform documents
pydantic model langchain.document_transformers.EmbeddingsRedundantFilter[source]#
Filter that drops redundant documents by comparing their embeddings.
field embeddings: langchain.embeddings.base.Embeddings [Required]#
Embeddings to use for embedding document contents.
field similarity_fn: Callable = <function cosine_similarity>#
Similarity function for comparing documents. Function expected to take as input
two matrices (List[List[float]]) and return a matrix of scores where higher values
indicate greater similarity.
field similarity_threshold: float = 0.95#
Threshold for determining when two documents are similar enough
to be considered redundant.
async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]#
Asynchronously transform a list of documents.
transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]#
Filter down documents.
langchain.document_transformers.get_stateful_documents(documents: Sequence[langchain.schema.Document]) → Sequence[langchain.document_transformers._DocumentWithState][source]#
previous
Document Compressors
next
Memory
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/reference/modules/document_transformers.html |
5b25cabdd8f4-0 | .md
.pdf
YouTube
Contents
⛓️Official LangChain YouTube channel⛓️
Introduction to LangChain with Harrison Chase, creator of LangChain
Videos (sorted by views)
YouTube#
This is a collection of LangChain videos on YouTube.
⛓️Official LangChain YouTube channel⛓️#
Introduction to LangChain with Harrison Chase, creator of LangChain#
Building the Future with LLMs, LangChain, & Pinecone by Pinecone
LangChain and Weaviate with Harrison Chase and Bob van Luijt - Weaviate Podcast #36 by Weaviate • Vector Database
LangChain Demo + Q&A with Harrison Chase by Full Stack Deep Learning
LangChain Agents: Build Personal Assistants For Your Data (Q&A with Harrison Chase and Mayo Oshin) by Chat with data
⛓️ LangChain “Agents in Production” Webinar by LangChain
Videos (sorted by views)#
Building AI LLM Apps with LangChain (and more?) - LIVE STREAM by Nicholas Renotte
First look - ChatGPT + WolframAlpha (GPT-3.5 and Wolfram|Alpha via LangChain by James Weaver) by Dr Alan D. Thompson
LangChain explained - The hottest new Python framework by AssemblyAI
Chatbot with INFINITE MEMORY using OpenAI & Pinecone - GPT-3, Embeddings, ADA, Vector DB, Semantic by David Shapiro ~ AI
LangChain for LLMs is… basically just an Ansible playbook by David Shapiro ~ AI
Build your own LLM Apps with LangChain & GPT-Index by 1littlecoder
BabyAGI - New System of Autonomous AI Agents with LangChain by 1littlecoder
Run BabyAGI with Langchain Agents (with Python Code) by 1littlecoder | https://python.langchain.com/en/latest/additional_resources/youtube.html |
5b25cabdd8f4-1 | Run BabyAGI with Langchain Agents (with Python Code) by 1littlecoder
How to Use Langchain With Zapier | Write and Send Email with GPT-3 | OpenAI API Tutorial by StarMorph AI
Use Your Locally Stored Files To Get Response From GPT - OpenAI | Langchain | Python by Shweta Lodha
Langchain JS | How to Use GPT-3, GPT-4 to Reference your own Data | OpenAI Embeddings Intro by StarMorph AI
The easiest way to work with large language models | Learn LangChain in 10min by Sophia Yang
4 Autonomous AI Agents: “Westworld” simulation BabyAGI, AutoGPT, Camel, LangChain by Sophia Yang
AI CAN SEARCH THE INTERNET? Langchain Agents + OpenAI ChatGPT by tylerwhatsgood
Query Your Data with GPT-4 | Embeddings, Vector Databases | Langchain JS Knowledgebase by StarMorph AI
Weaviate + LangChain for LLM apps presented by Erika Cardenas by Weaviate • Vector Database
Langchain Overview — How to Use Langchain & ChatGPT by Python In Office
Langchain Overview - How to Use Langchain & ChatGPT by Python In Office
Custom langchain Agent & Tools with memory. Turn any Python function into langchain tool with Gpt 3 by echohive
LangChain: Run Language Models Locally - Hugging Face Models by Prompt Engineering
ChatGPT with any YouTube video using langchain and chromadb by echohive
How to Talk to a PDF using LangChain and ChatGPT by Automata Learning Lab
Langchain Document Loaders Part 1: Unstructured Files by Merk
LangChain - Prompt Templates (what all the best prompt engineers use) by Nick Daigler
LangChain. Crear aplicaciones Python impulsadas por GPT by Jesús Conde | https://python.langchain.com/en/latest/additional_resources/youtube.html |
5b25cabdd8f4-2 | LangChain. Crear aplicaciones Python impulsadas por GPT by Jesús Conde
Easiest Way to Use GPT In Your Products | LangChain Basics Tutorial by Rachel Woods
BabyAGI + GPT-4 Langchain Agent with Internet Access by tylerwhatsgood
Learning LLM Agents. How does it actually work? LangChain, AutoGPT & OpenAI by Arnoldas Kemeklis
Get Started with LangChain in Node.js by Developers Digest
LangChain + OpenAI tutorial: Building a Q&A system w/ own text data by Samuel Chan
Langchain + Zapier Agent by Merk
Connecting the Internet with ChatGPT (LLMs) using Langchain And Answers Your Questions by Kamalraj M M
Build More Powerful LLM Applications for Business’s with LangChain (Beginners Guide) by No Code Blackbox
⛓️ LangFlow LLM Agent Demo for 🦜🔗LangChain by Cobus Greyling
⛓️ Chatbot Factory: Streamline Python Chatbot Creation with LLMs and Langchain by Finxter
⛓️ LangChain Tutorial - ChatGPT mit eigenen Daten by Coding Crashkurse
⛓️ Chat with a CSV | LangChain Agents Tutorial (Beginners) by GoDataProf
⛓️ Introdução ao Langchain - #Cortes - Live DataHackers by Prof. João Gabriel Lima
⛓️ LangChain: Level up ChatGPT !? | LangChain Tutorial Part 1 by Code Affinity
⛓️ KI schreibt krasses Youtube Skript 😲😳 | LangChain Tutorial Deutsch by SimpleKI
⛓️ Chat with Audio: Langchain, Chroma DB, OpenAI, and Assembly AI by AI Anytime
⛓️ QA over documents with Auto vector index selection with Langchain router chains by echohive | https://python.langchain.com/en/latest/additional_resources/youtube.html |
5b25cabdd8f4-3 | ⛓️ Build your own custom LLM application with Bubble.io & Langchain (No Code & Beginner friendly) by No Code Blackbox
⛓️ Simple App to Question Your Docs: Leveraging Streamlit, Hugging Face Spaces, LangChain, and Claude! by Chris Alexiuk
⛓️ LANGCHAIN AI- ConstitutionalChainAI + Databutton AI ASSISTANT Web App by Avra
⛓️ LANGCHAIN AI AUTONOMOUS AGENT WEB APP - 👶 BABY AGI 🤖 with EMAIL AUTOMATION using DATABUTTON by Avra
⛓️ The Future of Data Analysis: Using A.I. Models in Data Analysis (LangChain) by Absent Data
⛓️ Memory in LangChain | Deep dive (python) by Eden Marco
⛓️ 9 LangChain UseCases | Beginner’s Guide | 2023 by Data Science Basics
⛓️ Use Large Language Models in Jupyter Notebook | LangChain | Agents & Indexes by Abhinaw Tiwari
⛓️ How to Talk to Your Langchain Agent | 11 Labs + Whisper by VRSEN
⛓️ LangChain Deep Dive: 5 FUN AI App Ideas To Build Quickly and Easily by James NoCode
⛓️ BEST OPEN Alternative to OPENAI’s EMBEDDINGs for Retrieval QA: LangChain by Prompt Engineering
⛓️ LangChain 101: Models by Mckay Wrigley
⛓️ LangChain with JavaScript Tutorial #1 | Setup & Using LLMs by Leon van Zyl
⛓️ LangChain Overview & Tutorial for Beginners: Build Powerful AI Apps Quickly & Easily (ZERO CODE) by James NoCode
⛓️ LangChain In Action: Real-World Use Case With Step-by-Step Tutorial by Rabbitmetrics | https://python.langchain.com/en/latest/additional_resources/youtube.html |
5b25cabdd8f4-4 | ⛓️ Summarizing and Querying Multiple Papers with LangChain by Automata Learning Lab
⛓️ Using Langchain (and Replit) through Tana, ask Google/Wikipedia/Wolfram Alpha to fill out a table by Stian Håklev
⛓️ Langchain PDF App (GUI) | Create a ChatGPT For Your PDF in Python by Alejandro AO - Software & Ai
⛓️ Auto-GPT with LangChain 🔥 | Create Your Own Personal AI Assistant by Data Science Basics
⛓️ Create Your OWN Slack AI Assistant with Python & LangChain by Dave Ebbelaar
⛓️ How to Create LOCAL Chatbots with GPT4All and LangChain [Full Guide] by Liam Ottley
⛓️ Build a Multilingual PDF Search App with LangChain, Cohere and Bubble by Menlo Park Lab
⛓️ Building a LangChain Agent (code-free!) Using Bubble and Flowise by Menlo Park Lab
⛓️ Build a LangChain-based Semantic PDF Search App with No-Code Tools Bubble and Flowise by Menlo Park Lab
⛓️ LangChain Memory Tutorial | Building a ChatGPT Clone in Python by Alejandro AO - Software & Ai
⛓️ ChatGPT For Your DATA | Chat with Multiple Documents Using LangChain by Data Science Basics
⛓️ Llama Index: Chat with Documentation using URL Loader by Merk
⛓️ Using OpenAI, LangChain, and Gradio to Build Custom GenAI Applications by David Hundley
⛓ icon marks a new video [last update 2023-05-15]
previous
Model Comparison
Contents
⛓️Official LangChain YouTube channel⛓️
Introduction to LangChain with Harrison Chase, creator of LangChain
Videos (sorted by views)
By Harrison Chase | https://python.langchain.com/en/latest/additional_resources/youtube.html |
5b25cabdd8f4-5 | Videos (sorted by views)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/additional_resources/youtube.html |
2d1194c9f16e-0 | .ipynb
.pdf
Model Comparison
Model Comparison#
Constructing your language model application will likely involved choosing between many different options of prompts, models, and even chains to use. When doing so, you will want to compare these different options on different inputs in an easy, flexible, and intuitive way.
LangChain provides the concept of a ModelLaboratory to test out and try different models.
from langchain import LLMChain, OpenAI, Cohere, HuggingFaceHub, PromptTemplate
from langchain.model_laboratory import ModelLaboratory
llms = [
OpenAI(temperature=0),
Cohere(model="command-xlarge-20221108", max_tokens=20, temperature=0),
HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature":1})
]
model_lab = ModelLaboratory.from_llms(llms)
model_lab.compare("What color is a flamingo?")
Input:
What color is a flamingo?
OpenAI
Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}
Flamingos are pink.
Cohere
Params: {'model': 'command-xlarge-20221108', 'max_tokens': 20, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}
Pink
HuggingFaceHub
Params: {'repo_id': 'google/flan-t5-xl', 'temperature': 1}
pink | https://python.langchain.com/en/latest/additional_resources/model_laboratory.html |
2d1194c9f16e-1 | pink
prompt = PromptTemplate(template="What is the capital of {state}?", input_variables=["state"])
model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)
model_lab_with_prompt.compare("New York")
Input:
New York
OpenAI
Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}
The capital of New York is Albany.
Cohere
Params: {'model': 'command-xlarge-20221108', 'max_tokens': 20, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}
The capital of New York is Albany.
HuggingFaceHub
Params: {'repo_id': 'google/flan-t5-xl', 'temperature': 1}
st john s
from langchain import SelfAskWithSearchChain, SerpAPIWrapper
open_ai_llm = OpenAI(temperature=0)
search = SerpAPIWrapper()
self_ask_with_search_openai = SelfAskWithSearchChain(llm=open_ai_llm, search_chain=search, verbose=True)
cohere_llm = Cohere(temperature=0, model="command-xlarge-20221108")
search = SerpAPIWrapper()
self_ask_with_search_cohere = SelfAskWithSearchChain(llm=cohere_llm, search_chain=search, verbose=True)
chains = [self_ask_with_search_openai, self_ask_with_search_cohere]
names = [str(open_ai_llm), str(cohere_llm)] | https://python.langchain.com/en/latest/additional_resources/model_laboratory.html |
2d1194c9f16e-2 | names = [str(open_ai_llm), str(cohere_llm)]
model_lab = ModelLaboratory(chains, names=names)
model_lab.compare("What is the hometown of the reigning men's U.S. Open champion?")
Input:
What is the hometown of the reigning men's U.S. Open champion?
OpenAI
Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}
> Entering new chain...
What is the hometown of the reigning men's U.S. Open champion?
Are follow up questions needed here: Yes.
Follow up: Who is the reigning men's U.S. Open champion?
Intermediate answer: Carlos Alcaraz.
Follow up: Where is Carlos Alcaraz from?
Intermediate answer: El Palmar, Spain.
So the final answer is: El Palmar, Spain
> Finished chain.
So the final answer is: El Palmar, Spain
Cohere
Params: {'model': 'command-xlarge-20221108', 'max_tokens': 256, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}
> Entering new chain...
What is the hometown of the reigning men's U.S. Open champion?
Are follow up questions needed here: Yes.
Follow up: Who is the reigning men's U.S. Open champion?
Intermediate answer: Carlos Alcaraz.
So the final answer is:
Carlos Alcaraz
> Finished chain.
So the final answer is:
Carlos Alcaraz
previous
Tracing
next | https://python.langchain.com/en/latest/additional_resources/model_laboratory.html |
2d1194c9f16e-3 | So the final answer is:
Carlos Alcaraz
previous
Tracing
next
YouTube
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/additional_resources/model_laboratory.html |
e92df267171d-0 | .md
.pdf
Tracing
Contents
Tracing Walkthrough
Changing Sessions
Tracing#
By enabling tracing in your LangChain runs, you’ll be able to more effectively visualize, step through, and debug your chains and agents.
First, you should install tracing and set up your environment properly.
You can use either a locally hosted version of this (uses Docker) or a cloud hosted version (in closed alpha).
If you’re interested in using the hosted platform, please fill out the form here.
Locally Hosted Setup
Cloud Hosted Setup
Tracing Walkthrough#
When you first access the UI, you should see a page with your tracing sessions.
An initial one “default” should already be created for you.
A session is just a way to group traces together.
If you click on a session, it will take you to a page with no recorded traces that says “No Runs.”
You can create a new session with the new session form.
If we click on the default session, we can see that to start we have no traces stored.
If we now start running chains and agents with tracing enabled, we will see data show up here.
To do so, we can run this notebook as an example.
After running it, we will see an initial trace show up.
From here we can explore the trace at a high level by clicking on the arrow to show nested runs.
We can keep on clicking further and further down to explore deeper and deeper.
We can also click on the “Explore” button of the top level run to dive even deeper.
Here, we can see the inputs and outputs in full, as well as all the nested traces.
We can keep on exploring each of these nested traces in more detail.
For example, here is the lowest level trace with the exact inputs/outputs to the LLM.
Changing Sessions# | https://python.langchain.com/en/latest/additional_resources/tracing.html |
e92df267171d-1 | Changing Sessions#
To initially record traces to a session other than "default", you can set the LANGCHAIN_SESSION environment variable to the name of the session you want to record to:
import os
os.environ["LANGCHAIN_TRACING"] = "true"
os.environ["LANGCHAIN_SESSION"] = "my_session" # Make sure this session actually exists. You can create a new session in the UI.
To switch sessions mid-script or mid-notebook, do NOT set the LANGCHAIN_SESSION environment variable. Instead: langchain.set_tracing_callback_manager(session_name="my_session")
previous
Deployments
next
Model Comparison
Contents
Tracing Walkthrough
Changing Sessions
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/additional_resources/tracing.html |
0276e2e8fde9-0 | .rst
.pdf
Indexes
Contents
Go Deeper
Indexes#
Note
Conceptual Guide
Indexes refer to ways to structure documents so that LLMs can best interact with them.
This module contains utility functions for working with documents, different types of indexes, and then examples for using those indexes in chains.
The most common way that indexes are used in chains is in a “retrieval” step.
This step refers to taking a user’s query and returning the most relevant documents.
We draw this distinction because (1) an index can be used for other things besides retrieval, and (2) retrieval can use other logic besides an index to find relevant documents.
We therefore have a concept of a “Retriever” interface - this is the interface that most chains work with.
Most of the time when we talk about indexes and retrieval we are talking about indexing and retrieving unstructured data (like text documents).
For interacting with structured data (SQL tables, etc) or APIs, please see the corresponding use case sections for links to relevant functionality.
The primary index and retrieval types supported by LangChain are currently centered around vector databases, and therefore
a lot of the functionality we dive deep on those topics.
For an overview of everything related to this, please see the below notebook for getting started:
Getting Started
We then provide a deep dive on the four main components.
Document Loaders
How to load documents from a variety of sources.
Text Splitters
An overview of the abstractions and implementions around splitting text.
VectorStores
An overview of VectorStores and the many integrations LangChain provides.
Retrievers
An overview of Retrievers and the implementations LangChain provides.
Go Deeper#
Document Loaders
Text Splitters
Vectorstores
Retrievers
previous
Zep Memory
next
Getting Started
Contents
Go Deeper | https://python.langchain.com/en/latest/modules/indexes.html |
0276e2e8fde9-1 | previous
Zep Memory
next
Getting Started
Contents
Go Deeper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/modules/indexes.html |
7974129db4c1-0 | .rst
.pdf
Models
Contents
Getting Started
Go Deeper
Models#
Note
Conceptual Guide
This section of the documentation deals with different types of models that are used in LangChain.
On this page we will go over the model types at a high level,
but we have individual pages for each model type.
The pages contain more detailed “how-to” guides for working with that model,
as well as a list of different model providers.
LLMs
Large Language Models (LLMs) are the first type of models we cover.
These models take a text string as input, and return a text string as output.
Chat Models
Chat Models are the second type of models we cover.
These models are usually backed by a language model, but their APIs are more structured.
Specifically, these models take a list of Chat Messages as input, and return a Chat Message.
Text Embedding Models
The third type of models we cover are text embedding models.
These models take text as input and return a list of floats.
Getting Started#
Getting Started
Go Deeper#
LLMs
Chat Models
Text Embedding Models
previous
Tutorials
next
Getting Started
Contents
Getting Started
Go Deeper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/modules/models.html |
109afbd228ed-0 | .rst
.pdf
Agents
Contents
Action Agents
Plan-and-Execute Agents
Agents#
Note
Conceptual Guide
Some applications will require not just a predetermined chain of calls to LLMs/other tools,
but potentially an unknown chain that depends on the user’s input.
In these types of chains, there is a “agent” which has access to a suite of tools.
Depending on the user input, the agent can then decide which, if any, of these tools to call.
At the moment, there are two main types of agents:
“Action Agents”: these agents decide an action to take and take that action one step at a time
“Plan-and-Execute Agents”: these agents first decide a plan of actions to take, and then execute those actions one at a time.
When should you use each one? Action Agents are more conventional, and good for small tasks.
For more complex or long running tasks, the initial planning step helps to maintain long term objectives and focus. However, that comes at the expense of generally more calls and higher latency.
These two agents are also not mutually exclusive - in fact, it is often best to have an Action Agent be in charge of the execution for the Plan and Execute agent.
Action Agents#
High level pseudocode of agents looks something like:
Some user input is received
The agent decides which tool - if any - to use, and what the input to that tool should be
That tool is then called with that tool input, and an observation is recorded (this is just the output of calling that tool with that tool input)
That history of tool, tool input, and observation is passed back into the agent, and it decides what step to take next
This is repeated until the agent decides it no longer needs to use a tool, and then it responds directly to the user.
The different abstractions involved in agents are as follows: | https://python.langchain.com/en/latest/modules/agents.html |
109afbd228ed-1 | The different abstractions involved in agents are as follows:
Agent: this is where the logic of the application lives. Agents expose an interface that takes in user input along with a list of previous steps the agent has taken, and returns either an AgentAction or AgentFinish
AgentAction corresponds to the tool to use and the input to that tool
AgentFinish means the agent is done, and has information around what to return to the user
Tools: these are the actions an agent can take. What tools you give an agent highly depend on what you want the agent to do
Toolkits: these are groups of tools designed for a specific use case. For example, in order for an agent to interact with a SQL database in the best way it may need access to one tool to execute queries and another tool to inspect tables.
Agent Executor: this wraps an agent and a list of tools. This is responsible for the loop of running the agent iteratively until the stopping criteria is met.
The most important abstraction of the four above to understand is that of the agent.
Although an agent can be defined in whatever way one chooses, the typical way to construct an agent is with:
PromptTemplate: this is responsible for taking the user input and previous steps and constructing a prompt to send to the language model
Language Model: this takes the prompt constructed by the PromptTemplate and returns some output
Output Parser: this takes the output of the Language Model and parses it into an AgentAction or AgentFinish object.
In this section of documentation, we first start with a Getting Started notebook to cover how to use all things related to agents in an end-to-end manner.
We then split the documentation into the following sections:
Tools
In this section we cover the different types of tools LangChain supports natively.
We then cover how to add your own tools.
Agents
In this section we cover the different types of agents LangChain supports natively. | https://python.langchain.com/en/latest/modules/agents.html |
109afbd228ed-2 | Agents
In this section we cover the different types of agents LangChain supports natively.
We then cover how to modify and create your own agents.
Toolkits
In this section we go over the various toolkits that LangChain supports out of the box,
and how to create an agent from them.
Agent Executor
In this section we go over the Agent Executor class, which is responsible for calling
the agent and tools in a loop. We go over different ways to customize this, and options you
can use for more control.
Go Deeper
Tools
Agents
Toolkits
Agent Executors
Plan-and-Execute Agents#
High level pseudocode of agents looks something like:
Some user input is received
The planner lists out the steps to take
The executor goes through the list of steps, executing them
The most typical implementation is to have the planner be a language model,
and the executor be an action agent.
Go Deeper
Plan and Execute
Imports
Tools
Planner, Executor, and Agent
Run Example
previous
Chains
next
Getting Started
Contents
Action Agents
Plan-and-Execute Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/modules/agents.html |
293f0adc4476-0 | .rst
.pdf
Memory
Memory#
Note
Conceptual Guide
By default, Chains and Agents are stateless,
meaning that they treat each incoming query independently (as are the underlying LLMs and chat models).
In some applications (chatbots being a GREAT example) it is highly important
to remember previous interactions, both at a short term but also at a long term level.
The concept of “Memory” exists to do exactly that.
LangChain provides memory components in two forms.
First, LangChain provides helper utilities for managing and manipulating previous chat messages.
These are designed to be modular and useful regardless of how they are used.
Secondly, LangChain provides easy ways to incorporate these utilities into chains.
The following sections of documentation are provided:
Getting Started: An overview of how to get started with different types of memory.
How-To Guides: A collection of how-to guides. These highlight different types of memory, as well as how to use memory in chains.
Memory
Getting Started
How-To Guides
previous
Structured Output Parser
next
Getting Started
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/modules/memory.html |
17f0db6a126c-0 | .rst
.pdf
Chains
Chains#
Note
Conceptual Guide
Using an LLM in isolation is fine for some simple applications,
but many more complex ones require chaining LLMs - either with each other or with other experts.
LangChain provides a standard interface for Chains, as well as some common implementations of chains for ease of use.
The following sections of documentation are provided:
Getting Started: A getting started guide for chains, to get you up and running quickly.
How-To Guides: A collection of how-to guides. These highlight how to use various types of chains.
Reference: API reference documentation for all Chain classes.
previous
Zep Memory
next
Getting Started
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/modules/chains.html |
6f6ebe244706-0 | .rst
.pdf
Prompts
Contents
Getting Started
Go Deeper
Prompts#
Note
Conceptual Guide
The new way of programming models is through prompts.
A “prompt” refers to the input to the model.
This input is rarely hard coded, but rather is often constructed from multiple components.
A PromptTemplate is responsible for the construction of this input.
LangChain provides several classes and functions to make constructing and working with prompts easy.
This section of documentation is split into four sections:
LLM Prompt Templates
How to use PromptTemplates to prompt Language Models.
Chat Prompt Templates
How to use PromptTemplates to prompt Chat Models.
Example Selectors
Often times it is useful to include examples in prompts.
These examples can be hardcoded, but it is often more powerful if they are dynamically selected.
This section goes over example selection.
Output Parsers
Language models (and Chat Models) output text.
But many times you may want to get more structured information than just text back.
This is where output parsers come in.
Output Parsers are responsible for (1) instructing the model how output should be formatted,
(2) parsing output into the desired formatting (including retrying if necessary).
Getting Started#
Getting Started
Go Deeper#
Prompt Templates
Chat Prompt Template
Example Selectors
Output Parsers
previous
TensorflowHub
next
Getting Started
Contents
Getting Started
Go Deeper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Jun 02, 2023. | https://python.langchain.com/en/latest/modules/prompts.html |
b45b62629dd9-0 | .ipynb
.pdf
Callbacks
Contents
Callbacks
How to use callbacks
When do you want to use each of these?
Using an existing handler
Creating a custom handler
Async Callbacks
Using multiple handlers, passing in handlers
Tracing and Token Counting
Tracing
Token Counting
Callbacks#
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
You can subscribe to these events by using the callbacks argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail. There are two main callbacks mechanisms:
Constructor callbacks will be used for all calls made on that object, and will be scoped to that object only, i.e. if you pass a handler to the LLMChain constructor, it will not be used by the model attached to that chain.
Request callbacks will be used for that specific request only, and all sub-requests that it contains (eg. a call to an LLMChain triggers a call to a Model, which uses the same handler passed through). These are explicitly passed through.
Advanced: When you create a custom chain you can easily set it up to use the same callback system as all the built-in chains.
_call, _generate, _run, and equivalent async methods on Chains / LLMs / Chat Models / Agents / Tools now receive a 2nd argument called run_manager which is bound to that run, and contains the logging methods that can be used by that object (i.e. on_llm_new_token). This is useful when constructing a custom chain. See this guide for more information on how to create custom chains and use callbacks inside them. | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-1 | CallbackHandlers are objects that implement the CallbackHandler interface, which has a method for each event that can be subscribed to. The CallbackManager will call the appropriate method on each handler when the event is triggered.
class BaseCallbackHandler:
"""Base callback handler that can be used to handle callbacks from langchain."""
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
def on_tool_end(self, output: str, **kwargs: Any) -> Any: | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-2 | def on_tool_end(self, output: str, **kwargs: Any) -> Any:
"""Run when tool ends running."""
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
How to use callbacks#
The callbacks argument is available on most objects throughout the API (Chains, Models, Tools, Agents, etc.) in two different places:
Constructor callbacks: defined in the constructor, eg. LLMChain(callbacks=[handler]), which will be used for all calls made on that object, and will be scoped to that object only, eg. if you pass a handler to the LLMChain constructor, it will not be used by the Model attached to that chain.
Request callbacks: defined in the call()/run()/apply() methods used for issuing a request, eg. chain.call(inputs, callbacks=[handler]), which will be used for that specific request only, and all sub-requests that it contains (eg. a call to an LLMChain triggers a call to a Model, which uses the same handler passed in the call() method). | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-3 | The verbose argument is available on most objects throughout the API (Chains, Models, Tools, Agents, etc.) as a constructor argument, eg. LLMChain(verbose=True), and it is equivalent to passing a ConsoleCallbackHandler to the callbacks argument of that object and all child objects. This is useful for debugging, as it will log all events to the console.
When do you want to use each of these?#
Constructor callbacks are most useful for use cases such as logging, monitoring, etc., which are not specific to a single request, but rather to the entire chain. For example, if you want to log all the requests made to an LLMChain, you would pass a handler to the constructor.
Request callbacks are most useful for use cases such as streaming, where you want to stream the output of a single request to a specific websocket connection, or other similar use cases. For example, if you want to stream the output of a single request to a websocket, you would pass a handler to the call() method
Using an existing handler#
LangChain provides a few built-in handlers that you can use to get started. These are available in the langchain/callbacks module. The most basic handler is the StdOutCallbackHandler, which simply logs all events to stdout. In the future we will add more default handlers to the library.
Note when the verbose flag on the object is set to true, the StdOutCallbackHandler will be invoked even without being explicitly passed in.
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
handler = StdOutCallbackHandler()
llm = OpenAI()
prompt = PromptTemplate.from_template("1 + {number} = ")
# First, let's explicitly set the StdOutCallbackHandler in `callbacks` | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-4 | # First, let's explicitly set the StdOutCallbackHandler in `callbacks`
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[handler])
chain.run(number=2)
# Then, let's use the `verbose` flag to achieve the same result
chain = LLMChain(llm=llm, prompt=prompt, verbose=True)
chain.run(number=2)
# Finally, let's use the request `callbacks` to achieve the same result
chain = LLMChain(llm=llm, prompt=prompt)
chain.run(number=2, callbacks=[handler])
> Entering new LLMChain chain...
Prompt after formatting:
1 + 2 =
> Finished chain.
> Entering new LLMChain chain...
Prompt after formatting:
1 + 2 =
> Finished chain.
> Entering new LLMChain chain...
Prompt after formatting:
1 + 2 =
> Finished chain.
'\n\n3'
Creating a custom handler#
You can create a custom handler to set on the object as well. In the example below, we’ll implement streaming with a custom handler.
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
class MyCustomHandler(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs) -> None:
print(f"My custom handler, token: {token}")
# To enable streaming, we pass in `streaming=True` to the ChatModel constructor
# Additionally, we pass in a list with our custom handler
chat = ChatOpenAI(max_tokens=25, streaming=True, callbacks=[MyCustomHandler()])
chat([HumanMessage(content="Tell me a joke")])
My custom handler, token: | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-5 | My custom handler, token:
My custom handler, token: Why
My custom handler, token: did
My custom handler, token: the
My custom handler, token: tomato
My custom handler, token: turn
My custom handler, token: red
My custom handler, token: ?
My custom handler, token: Because
My custom handler, token: it
My custom handler, token: saw
My custom handler, token: the
My custom handler, token: salad
My custom handler, token: dressing
My custom handler, token: !
My custom handler, token:
AIMessage(content='Why did the tomato turn red? Because it saw the salad dressing!', additional_kwargs={})
Async Callbacks#
If you are planning to use the async API, it is recommended to use AsyncCallbackHandler to avoid blocking the runloop.
Advanced if you use a sync CallbackHandler while using an async method to run your llm/chain/tool/agent, it will still work. However, under the hood, it will be called with run_in_executor which can cause issues if your CallbackHandler is not thread-safe.
import asyncio
from typing import Any, Dict, List
from langchain.schema import LLMResult
from langchain.callbacks.base import AsyncCallbackHandler
class MyCustomSyncHandler(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs) -> None:
print(f"Sync handler being called in a `thread_pool_executor`: token: {token}")
class MyCustomAsyncHandler(AsyncCallbackHandler):
"""Async callback handler that can be used to handle callbacks from langchain."""
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None: | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-6 | ) -> None:
"""Run when chain starts running."""
print("zzzz....")
await asyncio.sleep(0.3)
class_name = serialized["name"]
print("Hi! I just woke up. Your llm is starting")
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when chain ends running."""
print("zzzz....")
await asyncio.sleep(0.3)
print("Hi! I just woke up. Your llm is ending")
# To enable streaming, we pass in `streaming=True` to the ChatModel constructor
# Additionally, we pass in a list with our custom handler
chat = ChatOpenAI(max_tokens=25, streaming=True, callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()])
await chat.agenerate([[HumanMessage(content="Tell me a joke")]])
zzzz....
Hi! I just woke up. Your llm is starting
Sync handler being called in a `thread_pool_executor`: token:
Sync handler being called in a `thread_pool_executor`: token: Why
Sync handler being called in a `thread_pool_executor`: token: don
Sync handler being called in a `thread_pool_executor`: token: 't
Sync handler being called in a `thread_pool_executor`: token: scientists
Sync handler being called in a `thread_pool_executor`: token: trust
Sync handler being called in a `thread_pool_executor`: token: atoms
Sync handler being called in a `thread_pool_executor`: token: ?
Sync handler being called in a `thread_pool_executor`: token: Because
Sync handler being called in a `thread_pool_executor`: token: they
Sync handler being called in a `thread_pool_executor`: token: make | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-7 | Sync handler being called in a `thread_pool_executor`: token: make
Sync handler being called in a `thread_pool_executor`: token: up
Sync handler being called in a `thread_pool_executor`: token: everything
Sync handler being called in a `thread_pool_executor`: token: !
Sync handler being called in a `thread_pool_executor`: token:
zzzz....
Hi! I just woke up. Your llm is ending
LLMResult(generations=[[ChatGeneration(text="Why don't scientists trust atoms?\n\nBecause they make up everything!", generation_info=None, message=AIMessage(content="Why don't scientists trust atoms?\n\nBecause they make up everything!", additional_kwargs={}))]], llm_output={'token_usage': {}, 'model_name': 'gpt-3.5-turbo'})
Using multiple handlers, passing in handlers#
In the previous examples, we passed in callback handlers upon creation of an object by using callbacks=. In this case, the callbacks will be scoped to that particular object.
However, in many cases, it is advantageous to pass in handlers instead when running the object. When we pass through CallbackHandlers using the callbacks keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent’s execution, in this case, the Tools, LLMChain, and LLM.
This prevents us from having to manually attach the handlers to each individual nested object.
from typing import Dict, Union, Any, List
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.callbacks import tracing_enabled
from langchain.llms import OpenAI | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
b45b62629dd9-8 | from langchain.callbacks import tracing_enabled
from langchain.llms import OpenAI
# First, define custom callback handler implementations
class MyCustomHandlerOne(BaseCallbackHandler):
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
print(f"on_llm_start {serialized['name']}")
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
print(f"on_new_token {token}")
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
print(f"on_chain_start {serialized['name']}")
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
print(f"on_tool_start {serialized['name']}")
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
print(f"on_agent_action {action}")
class MyCustomHandlerTwo(BaseCallbackHandler):
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
print(f"on_llm_start (I'm the second handler!!) {serialized['name']}")
# Instantiate the handlers
handler1 = MyCustomHandlerOne()
handler2 = MyCustomHandlerTwo()
# Setup the agent. Only the `llm` will issue callbacks for handler2 | https://python.langchain.com/en/latest/modules/callbacks/getting_started.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.