diff --git a/langchain_g4f/G4FLLM.py b/langchain_g4f/G4FLLM.py index eb75542..03ed2e0 100644 --- a/langchain_g4f/G4FLLM.py +++ b/langchain_g4f/G4FLLM.py @@ -1,7 +1,8 @@ from types import ModuleType -from typing import Optional, List, Any, Mapping, Union +from typing import Any, List, Mapping, Optional, Union -import g4f +from g4f import ChatCompletion +from g4f.models import Model from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens @@ -9,7 +10,7 @@ from langchain.llms.utils import enforce_stop_tokens class G4FLLM(LLM): # Model.model or str - model: Union[type, str] + model: Union[Model, str] # Provider.Provider provider: Optional[ModuleType] = None auth: Optional[Union[str, bool]] = None @@ -34,7 +35,7 @@ class G4FLLM(LLM): if self.auth is not None: create_kwargs["auth"] = self.auth - text = g4f.ChatCompletion.create( + text = ChatCompletion.create( messages=[{"role": "user", "content": prompt}], **create_kwargs, ) diff --git a/sample/llm_sample.py b/sample/llm_sample.py index 804d7bb..deaca80 100644 --- a/sample/llm_sample.py +++ b/sample/llm_sample.py @@ -1,4 +1,4 @@ -from g4f import Provider, Model +from g4f import Provider, models from langchain.llms.base import LLM from langchain_g4f import G4FLLM @@ -6,7 +6,7 @@ from langchain_g4f import G4FLLM def main(): llm: LLM = G4FLLM( - model=Model.gpt_35_turbo, + model=models.gpt_35_turbo, provider=Provider.Aichat, ) diff --git a/sample/prompt_template_sample.py b/sample/prompt_template_sample.py index b45b087..54fb11d 100644 --- a/sample/prompt_template_sample.py +++ b/sample/prompt_template_sample.py @@ -1,6 +1,6 @@ -from g4f import Provider, Model -from langchain.llms.base import LLM +from g4f import Provider, models from langchain import PromptTemplate +from langchain.llms.base import LLM from langchain_g4f import G4FLLM @@ -10,7 +10,7 @@ def main(): prompt_template = PromptTemplate(template=template, input_variables=["fruit"]) llm: LLM = G4FLLM( - model=Model.gpt_35_turbo, + model=models.gpt_35_turbo, provider=Provider.Aichat, ) diff --git a/sample/sequential_chain_sample.py b/sample/sequential_chain_sample.py index a28ee37..8a38e2f 100644 --- a/sample/sequential_chain_sample.py +++ b/sample/sequential_chain_sample.py @@ -1,14 +1,14 @@ -from g4f import Provider, Model -from langchain.llms.base import LLM +from g4f import Provider, models from langchain import PromptTemplate from langchain.chains import LLMChain, SimpleSequentialChain +from langchain.llms.base import LLM from langchain_g4f import G4FLLM def main(): llm: LLM = G4FLLM( - model=Model.gpt_35_turbo, + model=models.gpt_35_turbo, provider=Provider.DeepAi, ) diff --git a/sample/simple_chain_sample.py b/sample/simple_chain_sample.py index d2d2299..63514e4 100644 --- a/sample/simple_chain_sample.py +++ b/sample/simple_chain_sample.py @@ -1,14 +1,14 @@ -from g4f import Provider, Model -from langchain.llms.base import LLM +from g4f import Provider, models from langchain import PromptTemplate from langchain.chains import LLMChain +from langchain.llms.base import LLM from langchain_g4f import G4FLLM def main(): llm: LLM = G4FLLM( - model=Model.gpt_35_turbo, + model=models.gpt_35_turbo, provider=Provider.Aichat, ) prompt_template = PromptTemplate(