def validate_provider_credentials(self, credentials: dict) -> None: """ Validate provider credentials You can choose any validate_credentials method of model type or implement validate method by yourself, such as: get model list api if validate failed, raise exception :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. """
@propertydef _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: """ Map model invoke error to unified error The key is the error type thrown to the caller The value is the error type thrown by the model, which needs to be converted into a unified error type for the caller. :return: Invoke error mapping """
def get_num_tokens(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None) -> int: """ Get number of tokens for given prompt messages :param model: model name :param credentials: model credentials :param prompt_messages: prompt messages :param tools: tools for tool calling :return: """
def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: """ Get customizable model schema :param model: model name :param credentials: model credentials :return: model schema """
def _invoke(self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None) \ -> TextEmbeddingResult: """ Invoke large language model :param model: model name :param credentials: model credentials :param texts: texts to embed :param user: unique user id :return: embeddings result """
def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: """ Get number of tokens for given prompt messages :param model: model name :param credentials: model credentials :param texts: texts to embed :return: """
参数说明见上述 Embedding 调用。
同上述 LargeLanguageModel,该接口需要根据对应 model 选择合适的 tokenizer 进行计算,如果对应模型没有提供 tokenizer,可以使用AIModel基类中的_get_num_tokens_by_gpt2(text: str)方法进行计算。
def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) \ -> str: """ Invoke large language model :param model: model name :param credentials: model credentials :param file: audio file :param user: unique user id :return: text for given audio file """
def _invoke(self, model: str, credentials: dict, content_text: str, streaming: bool, user: Optional[str] = None): """ Invoke large language model :param model: model name :param credentials: model credentials :param content_text: text content to be translated :param streaming: output is streaming :param user: unique user id :return: translated audio file """
def _invoke(self, model: str, credentials: dict, text: str, user: Optional[str] = None) \ -> bool: """ Invoke large language model :param model: model name :param credentials: model credentials :param text: text to moderate :param user: unique user id :return: false if text is safe, true otherwise """
参数:
- model (string) 模型名称
- credentials (object) 凭据信息
凭据信息的参数由供应商 YAML 配置文件的 provider_credential_schema 或 model_credential_schema 定义,传入如:api_key 等。
- text (string) 文本内容
- user (string) [optional] 用户的唯一标识符
可以帮助供应商监控和检测滥用行为。
class TextPromptMessageContent(PromptMessageContent): """ Model class for text prompt message content. """ type: PromptMessageContentType = PromptMessageContentType.TEXT
class ImagePromptMessageContent(PromptMessageContent): """ Model class for image prompt message content. """ class DETAIL(Enum): LOW = 'low' HIGH = 'high' type: PromptMessageContentType = PromptMessageContentType.IMAGE detail: DETAIL = DETAIL.LOW # 分辨率
若传入图文,其中图片需要构造此实体作为 content 列表中的一部分 data 可以为 url 或者图片 base64 加密后的字符串。
class TextEmbeddingResult(BaseModel): """ Model class for text embedding result. """ model: str # 实际使用模型 embeddings: list[list[float]] # embedding 向量列表,对应传入的 texts 列表 usage: EmbeddingUsage # 使用信息