o
    h                      @   s   d dl Z d dlmZmZmZmZmZ ddlmZ ddl	m
Z
 ddlmZmZmZmZmZ ddlmZ eeZg dZG d	d
 d
ZdS )    N)AnyDictListOptionalUnion   )	constants)HfApi)build_hf_headersget_sessionis_pillow_availableloggingvalidate_hf_hub_args)_deprecate_method)ztext-classificationztoken-classificationztable-question-answeringzquestion-answeringzzero-shot-classificationtranslationsummarizationconversationalzfeature-extractionztext-generationztext2text-generationz	fill-maskzsentence-similarityztext-to-speechzautomatic-speech-recognitionzaudio-to-audiozaudio-classificationzvoice-activity-detectionzimage-classificationzobject-detectionzimage-segmentationztext-to-imagezimage-to-imageztabular-classificationztabular-regressionc                   @   s   e Zd ZdZeeddd			ddedee d	ee d
efddZ	dd Z
				ddeeeeee eee  f  dee dee dedef
ddZdS )InferenceApia  Client to configure requests and make calls to the HuggingFace Inference API.

    Example:

    ```python
    >>> from huggingface_hub.inference_api import InferenceApi

    >>> # Mask-fill example
    >>> inference = InferenceApi("bert-base-uncased")
    >>> inference(inputs="The goal of life is [MASK].")
    [{'sequence': 'the goal of life is life.', 'score': 0.10933292657136917, 'token': 2166, 'token_str': 'life'}]

    >>> # Question Answering example
    >>> inference = InferenceApi("deepset/roberta-base-squad2")
    >>> inputs = {
    ...     "question": "What's my name?",
    ...     "context": "My name is Clara and I live in Berkeley.",
    ... }
    >>> inference(inputs)
    {'score': 0.9326569437980652, 'start': 11, 'end': 16, 'answer': 'Clara'}

    >>> # Zero-shot example
    >>> inference = InferenceApi("typeform/distilbert-base-uncased-mnli")
    >>> inputs = "Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"
    >>> params = {"candidate_labels": ["refund", "legal", "faq"]}
    >>> inference(inputs, params)
    {'sequence': 'Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!', 'labels': ['refund', 'faq', 'legal'], 'scores': [0.9378499388694763, 0.04914155602455139, 0.013008488342165947]}

    >>> # Overriding configured task
    >>> inference = InferenceApi("bert-base-uncased", task="feature-extraction")

    >>> # Text-to-image
    >>> inference = InferenceApi("stabilityai/stable-diffusion-2-1")
    >>> inference("cat")
    <PIL.PngImagePlugin.PngImageFile image (...)>

    >>> # Return as raw response to parse the output yourself
    >>> inference = InferenceApi("mio/amadeus")
    >>> response = inference("hello world", raw_response=True)
    >>> response.headers
    {"Content-Type": "audio/flac", ...}
    >>> response.content # raw bytes from server
    b'(...)'
    ```
    z1.0z`InferenceApi` client is deprecated in favor of the more feature-complete `InferenceClient`. Check out this guide to learn how to convert your script to use it: https://huggingface.co/docs/huggingface_hub/guides/inference#legacy-inferenceapi-client.)versionmessageNFrepo_idtasktokengpuc                 C   s   d|d| _ t|d| _t|dj|d}|js|std|r:||jkr:|tvr1td| dt	d || _
n|jd	usCJ d
|j| _
tj d| j
 d| | _d	S )ak  Inits headers and API call information.

        Args:
            repo_id (``str``):
                Id of repository (e.g. `user/bert-base-uncased`).
            task (``str``, `optional`, defaults ``None``):
                Whether to force a task instead of using task specified in the
                repository.
            token (`str`, `optional`):
                The API token to use as HTTP bearer authorization. This is not
                the authentication token. You can find the token in
                https://huggingface.co/settings/token. Alternatively, you can
                find both your organizations and personal API tokens using
                `HfApi().whoami(token)`.
            gpu (`bool`, `optional`, defaults `False`):
                Whether to use GPU instead of CPU for inference(requires Startup
                plan at least).
        T)wait_for_modeluse_gpu)r   )r   zTask not specified in the repository. Please add it to the model card using pipeline_tag (https://huggingface.co/docs#how-is-a-models-type-of-inference-api-and-widget-determined)zInvalid task z. Make sure it's valid.zlYou're using a different task than the one specified in the repository. Be sure to know what you're doing :)NzPipeline tag cannot be Nonez
/pipeline//)optionsr
   headersr	   
model_infopipeline_tag
ValueError	ALL_TASKSloggerwarningr   r   INFERENCE_ENDPOINTapi_url)selfr   r   r   r   r    r(   q/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/huggingface_hub/inference_api.py__init__]   s"   "
zInferenceApi.__init__c                 C   s   d| j  d| j d| j dS )NzInferenceAPI(api_url='z	', task='z', options=))r&   r   r   )r'   r(   r(   r)   __repr__   s   zInferenceApi.__repr__inputsparamsdataraw_responsereturnc           	      C   s   d| j i}|r||d< |r||d< t j| j| j||d}|r!|S |jdp(d}|drIt s:td| j	 d	d
dl
m} |t|jS |dkrQ| S t| d)a
  Make a call to the Inference API.

        Args:
            inputs (`str` or `Dict` or `List[str]` or `List[List[str]]`, *optional*):
                Inputs for the prediction.
            params (`Dict`, *optional*):
                Additional parameters for the models. Will be sent as `parameters` in the
                payload.
            data (`bytes`, *optional*):
                Bytes content of the request. In this case, leave `inputs` and `params` empty.
            raw_response (`bool`, defaults to `False`):
                If `True`, the raw `Response` object is returned. You can parse its content
                as preferred. By default, the content is parsed into a more practical format
                (json dictionary or PIL Image for example).
        r   r-   
parameters)r   jsonr/   zContent-Type imagezTask 'z' returned as image but Pillow is not installed. Please install it (`pip install Pillow`) or pass `raw_response=True` to get the raw `Response` object and parse the image by yourself.r   )Imagezapplication/jsonz output type is not implemented yet. You can pass `raw_response=True` to get the raw `Response` object and parse the output by yourself.)r   r   postr&   r   get
startswithr   ImportErrorr   PILr6   openioBytesIOcontentr3   NotImplementedError)	r'   r-   r.   r/   r0   payloadresponsecontent_typer6   r(   r(   r)   __call__   s,   
zInferenceApi.__call__)NNF)NNNF)__name__
__module____qualname____doc__r   r   strr   boolr*   r,   r   r   r   bytesr   rD   r(   r(   r(   r)   r   .   sF    .4r   )r=   typingr   r   r   r   r   r4   r   hf_apir	   utilsr
   r   r   r   r   utils._deprecationr   
get_loggerrE   r#   r"   r   r(   r(   r(   r)   <module>   s    
!