defvalidate_provider_credentials(self,credentials:dict) ->None:""" Validate provider credentials You can choose any validate_credentials method of model type or implement validate method by yourself, such as: get model list api if validate failed, raise exception :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. """
defvalidate_credentials(self,model:str,credentials:dict) ->None:""" Validate model credentials :param model: model name :param credentials: model credentials :return: """
@propertydef_invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:""" Map model invoke error to unified error The key is the error type thrown to the caller The value is the error type thrown by the model, which needs to be converted into a unified error type for the caller. :return: Invoke error mapping """
def_invoke(self,model:str,credentials:dict,prompt_messages: list[PromptMessage],model_parameters:dict,tools: Optional[list[PromptMessageTool]]=None,stop: Optional[list[str]]=None,stream:bool=True,user: Optional[str]=None) \-> Union[LLMResult, Generator]:""" Invoke large language model :param model: model name :param credentials: model credentials :param prompt_messages: prompt messages :param model_parameters: model parameters :param tools: tools for tool calling :param stop: stop words :param stream: is stream response :param user: unique user id :return: full response or stream response chunk generator result """
defget_num_tokens(self,model:str,credentials:dict,prompt_messages: list[PromptMessage],tools: Optional[list[PromptMessageTool]]=None) ->int:""" Get number of tokens for given prompt messages :param model: model name :param credentials: model credentials :param prompt_messages: prompt messages :param tools: tools for tool calling :return: """
defget_customizable_model_schema(self,model:str,credentials:dict) -> Optional[AIModelEntity]:""" Get customizable model schema :param model: model name :param credentials: model credentials :return: model schema """
def_invoke(self,model:str,credentials:dict,texts: list[str],user: Optional[str]=None) \-> TextEmbeddingResult:""" Invoke large language model :param model: model name :param credentials: model credentials :param texts: texts to embed :param user: unique user id :return: embeddings result """
defget_num_tokens(self,model:str,credentials:dict,texts: list[str]) ->int:""" Get number of tokens for given prompt messages :param model: model name :param credentials: model credentials :param texts: texts to embed :return: """
def_invoke(self,model:str,credentials:dict,query:str,docs: list[str],score_threshold: Optional[float]=None,top_n: Optional[int]=None,user: Optional[str]=None) \-> RerankResult:""" Invoke rerank model :param model: model name :param credentials: model credentials :param query: search query :param docs: docs for reranking :param score_threshold: score threshold :param top_n: top n :param user: unique user id :return: rerank result """
def_invoke(self,model:str,credentials:dict,file: IO[bytes],user: Optional[str]=None) \->str:""" Invoke large language model :param model: model name :param credentials: model credentials :param file: audio file :param user: unique user id :return: text for given audio file """
def_invoke(self,model:str,credentials:dict,content_text:str,streaming:bool,user: Optional[str]=None):""" Invoke large language model :param model: model name :param credentials: model credentials :param content_text: text content to be translated :param streaming: output is streaming :param user: unique user id :return: translated audio file """
def_invoke(self,model:str,credentials:dict,text:str,user: Optional[str]=None) \->bool:""" Invoke large language model :param model: model name :param credentials: model credentials :param text: text to moderate :param user: unique user id :return: false if text is safe, true otherwise """
classTextPromptMessageContent(PromptMessageContent):""" Model class for text prompt message content. """type: PromptMessageContentType = PromptMessageContentType.TEXT
若传入图文,其中文字需要构造此实体作为 content 列表中的一部分。
ImagePromptMessageContent
classImagePromptMessageContent(PromptMessageContent):""" Model class for image prompt message content. """classDETAIL(Enum): LOW ='low' HIGH ='high'type: PromptMessageContentType = PromptMessageContentType.IMAGE detail: DETAIL = DETAIL.LOW # 分辨率
若传入图文,其中图片需要构造此实体作为 content 列表中的一部分
data 可以为 url 或者图片 base64 加密后的字符串。
PromptMessage
所有 Role 消息体的基类,仅作为参数声明用,不可初始化。
classPromptMessage(ABC,BaseModel):""" Model class for prompt message. """ role: PromptMessageRole # 消息角色 content: Optional[str| list[PromptMessageContent]]=None# 支持两种类型,字符串和内容列表,内容列表是为了满足多模态的需要,可详见 PromptMessageContent 说明。 name: Optional[str]=None# 名称,可选。
UserPromptMessage
UserMessage 消息体,代表用户消息。
classUserPromptMessage(PromptMessage):""" Model class for user prompt message. """ role: PromptMessageRole = PromptMessageRole.USER
AssistantPromptMessage
代表模型返回消息,通常用于 few-shots 或聊天历史传入。
classAssistantPromptMessage(PromptMessage):""" Model class for assistant prompt message. """classToolCall(BaseModel):""" Model class for assistant prompt message tool call. """classToolCallFunction(BaseModel):""" Model class for assistant prompt message tool call function. """ name:str# 工具名称 arguments:str# 工具参数id:str# 工具 ID,仅在 OpenAI tool call 生效,为工具调用的唯一 ID,同一个工具可以调用多次type:str# 默认 function function: ToolCallFunction # 工具调用信息 role: PromptMessageRole = PromptMessageRole.ASSISTANT tool_calls: list[ToolCall]= [] # 模型回复的工具调用结果(仅当传入 tools,并且模型认为需要调用工具时返回)
classTextEmbeddingResult(BaseModel):""" Model class for text embedding result. """ model:str# 实际使用模型 embeddings: list[list[float]]# embedding 向量列表,对应传入的 texts 列表 usage: EmbeddingUsage # 使用信息