o
    h߁                  	   @   s,  d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	m
Z
mZ d dlmZmZ eeZeG dd dZeG dd	 d	ZG d
d deZG dd deZG dd deZdeeeef  de
eee f fddZdd Zdedee deeeef  fddZde	ee  de	ee  fddZdS )    N)defaultdict)	dataclass)AnyDictListOptionalTupleUnion)logging	yaml_dumpc                   @   s8  e Zd ZU dZeed< eed< eed< eed< eed< dZee ed< dZ	ee ed	< dZ
ee ed
< dZee ed< dZeeeef  ed< dZee ed< dZee ed< dZeeeef  ed< dZee ed< dZee ed< dZee ed< dZee ed< edefddZdd defddZdddZdS )
EvalResultu  
    Flattened representation of individual evaluation results found in model-index of Model Cards.

    For more information on the model-index spec, see https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1.

    Args:
        task_type (`str`):
            The task identifier. Example: "image-classification".
        dataset_type (`str`):
            The dataset identifier. Example: "common_voice". Use dataset id from https://hf.co/datasets.
        dataset_name (`str`):
            A pretty name for the dataset. Example: "Common Voice (French)".
        metric_type (`str`):
            The metric identifier. Example: "wer". Use metric id from https://hf.co/metrics.
        metric_value (`Any`):
            The metric value. Example: 0.9 or "20.0 ± 1.2".
        task_name (`str`, *optional*):
            A pretty name for the task. Example: "Speech Recognition".
        dataset_config (`str`, *optional*):
            The name of the dataset configuration used in `load_dataset()`.
            Example: fr in `load_dataset("common_voice", "fr")`. See the `datasets` docs for more info:
            https://hf.co/docs/datasets/package_reference/loading_methods#datasets.load_dataset.name
        dataset_split (`str`, *optional*):
            The split used in `load_dataset()`. Example: "test".
        dataset_revision (`str`, *optional*):
            The revision (AKA Git Sha) of the dataset used in `load_dataset()`.
            Example: 5503434ddd753f426f4b38109466949a1217c2bb
        dataset_args (`Dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}`
        metric_name (`str`, *optional*):
            A pretty name for the metric. Example: "Test WER".
        metric_config (`str`, *optional*):
            The name of the metric configuration used in `load_metric()`.
            Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
            See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations
        metric_args (`Dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4
        verified (`bool`, *optional*):
            Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
        verify_token (`str`, *optional*):
            A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
        source_name (`str`, *optional*):
            The name of the source of the evaluation result. Example: "Open LLM Leaderboard".
        source_url (`str`, *optional*):
            The URL of the source of the evaluation result. Example: "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard".
    	task_typedataset_typedataset_namemetric_typemetric_valueN	task_namedataset_configdataset_splitdataset_revisiondataset_argsmetric_namemetric_configmetric_argsverifiedverify_tokensource_name
source_urlreturnc                 C   s   | j | j| j| j| jfS )z9Returns a tuple that uniquely identifies this evaluation.)r   r   r   r   r   self r!   q/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/huggingface_hub/repocard_data.pyunique_identifier   s   zEvalResult.unique_identifierotherc                 C   sD   | j  D ]\}}|dkrq|dkrt| |t||kr dS qdS )zx
        Return True if `self` and `other` describe exactly the same metric but with a
        different value.
        r   r   FT)__dict__itemsgetattr)r    r$   key_r!   r!   r"   is_equal_except_value   s   z EvalResult.is_equal_except_valuec                 C   s$   | j d ur| jd u rtdd S d S )NzAIf `source_name` is provided, `source_url` must also be provided.)r   r   
ValueErrorr   r!   r!   r"   __post_init__   s   zEvalResult.__post_init__)r   N)__name__
__module____qualname____doc__str__annotations__r   r   r   r   r   r   r   r   r   r   r   r   boolr   r   r   propertytupler#   r*   r,   r!   r!   r!   r"   r      s.   
 3
r   c                   @   s   e Zd ZdZd"defddZdd Zdd	 Zd#dee	e
  de
fddZdd Zdd Zd$de
dedefddZd$de
dedefddZde
defddZde
dedd
fddZde
defddZdefd d!Zd
S )%CardDataa  Structure containing metadata from a RepoCard.

    [`CardData`] is the parent class of [`ModelCardData`] and [`DatasetCardData`].

    Metadata can be exported as a dictionary or YAML. Export can be customized to alter the representation of the data
    (example: flatten evaluation results). `CardData` behaves as a dictionary (can get, pop, set values) but do not
    inherit from `dict` to allow this export step.
    Fignore_metadata_errorsc                 K   s   | j | d S N)r%   update)r    r7   kwargsr!   r!   r"   __init__   s   zCardData.__init__c                 C   s(   t | j}| | dd | D S )zConverts CardData to a dict.

        Returns:
            `dict`: CardData represented as a dictionary ready to be dumped to a YAML
            block for inclusion in a README.md file.
        c                 S   s   i | ]\}}|d ur||qS r8   r!   ).0r(   valuer!   r!   r"   
<dictcomp>   s    z$CardData.to_dict.<locals>.<dictcomp>)copydeepcopyr%   _to_dictr&   r    	data_dictr!   r!   r"   to_dict   s   
zCardData.to_dictc                 C   s   dS )zUse this method in child classes to alter the dict representation of the data. Alter the dict in-place.

        Args:
            data_dict (`dict`): The raw dict representation of the card data.
        Nr!   rB   r!   r!   r"   rA      s   zCardData._to_dictNoriginal_orderr   c                    sH   |r fdd|t t j t|  D  _t  d|d S )a
  Dumps CardData to a YAML block for inclusion in a README.md file.

        Args:
            line_break (str, *optional*):
                The line break to use when dumping to yaml.

        Returns:
            `str`: CardData represented as a YAML block.
        c                    s"   i | ]}| j v r| j | qS r!   r%   )r<   kr   r!   r"   r>      s
    

z$CardData.to_yaml.<locals>.<dictcomp>F)	sort_keys
line_break)listsetr%   keysr   rD   strip)r    rI   rE   r!   r   r"   to_yaml   s
   

zCardData.to_yamlc                 C   
   t | jS r8   )reprr%   r   r!   r!   r"   __repr__   s   
zCardData.__repr__c                 C   s   |   S r8   )rN   r   r!   r!   r"   __str__   s   zCardData.__str__r(   defaultc                 C   s   | j |}|du r|S |S )#Get value for a given metadata key.N)r%   get)r    r(   rS   r=   r!   r!   r"   rU      s   zCardData.getc                 C   s   | j ||S )z#Pop value for a given metadata key.)r%   pop)r    r(   rS   r!   r!   r"   rV         zCardData.popc                 C   s
   | j | S )rT   rF   r    r(   r!   r!   r"   __getitem__      
zCardData.__getitem__r=   c                 C   s   || j |< dS )z#Set value for a given metadata key.NrF   )r    r(   r=   r!   r!   r"   __setitem__   rW   zCardData.__setitem__c                 C   s
   || j v S )z%Check if a given metadata key is set.rF   rX   r!   r!   r"   __contains__   rZ   zCardData.__contains__c                 C   rO   )z'Return the number of metadata keys set.)lenr%   r   r!   r!   r"   __len__   rZ   zCardData.__len__)F)NNr8   )r-   r.   r/   r0   r3   r;   rD   rA   r   r   r1   rN   rQ   rR   r   rU   rV   rY   r[   r\   intr^   r!   r!   r!   r"   r6      s    	r6   c                       s   e Zd ZdZdddddddddddddddeeeee f  deeeee f  deee  deeeee f  d	ee d
ee dee dee deee  dee dee deee  de	f fddZ
dd Z  ZS )ModelCardDataaQ  Model Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        base_model (`str` or `List[str]`, *optional*):
            The identifier of the base model from which the model derives. This is applicable for example if your model is a
            fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs
            if your model derives from multiple models). Defaults to None.
        datasets (`Union[str, List[str]]`, *optional*):
            Dataset or list of datasets that were used to train this model. Should be a dataset ID
            found on https://hf.co/datasets. Defaults to None.
        eval_results (`Union[List[EvalResult], EvalResult]`, *optional*):
            List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided,
            `model_name` is used to as a name on PapersWithCode's leaderboards. Defaults to `None`.
        language (`Union[str, List[str]]`, *optional*):
            Language of model's training data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual". Defaults to `None`.
        library_name (`str`, *optional*):
            Name of library used by this model. Example: keras or any library from
            https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries.ts.
            Defaults to None.
        license (`str`, *optional*):
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses. Defaults to None.
        license_name (`str`, *optional*):
            Name of the license of this model. Defaults to None. To be used in conjunction with `license_link`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a name. In that case, use `license` instead.
        license_link (`str`, *optional*):
            Link to the license of this model. Defaults to None. To be used in conjunction with `license_name`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a link. In that case, use `license` instead.
        metrics (`List[str]`, *optional*):
            List of metrics used to evaluate this model. Should be a metric name that can be found
            at https://hf.co/metrics. Example: 'accuracy'. Defaults to None.
        model_name (`str`, *optional*):
            A name for this model. It is used along with
            `eval_results` to construct the `model-index` within the card's metadata. The name
            you supply here is what will be used on PapersWithCode's leaderboards. If None is provided
            then the repo name is used as a default. Defaults to None.
        pipeline_tag (`str`, *optional*):
            The pipeline tag associated with the model. Example: "text-classification".
        tags (`List[str]`, *optional*):
            List of tags to add to your model that can be used when filtering on the Hugging
            Face Hub. Defaults to None.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the model card. Defaults to None.

    Example:
        ```python
        >>> from huggingface_hub import ModelCardData
        >>> card_data = ModelCardData(
        ...     language="en",
        ...     license="mit",
        ...     library_name="timm",
        ...     tags=['image-classification', 'resnet'],
        ... )
        >>> card_data.to_dict()
        {'language': 'en', 'license': 'mit', 'library_name': 'timm', 'tags': ['image-classification', 'resnet']}

        ```
    NF)
base_modeldatasetseval_resultslanguagelibrary_namelicenselicense_namelicense_linkmetrics
model_namepipeline_tagtagsr7   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   r7   c             
      s  || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
t|| _|dd }|rezt|\}
}|
| _	|| _W n( ttfyd } z|rNtd ntd|j d| dW Y d }~nd }~ww t jdi | | jrt| jtr|| jg| _| j	d u rtdd S d S )Nmodel-indexz<Invalid model-index. Not loading eval results into CardData.z4Invalid `model_index` in metadata cannot be parsed:  z. Pass `ignore_metadata_errors=True` to ignore this error while loading a Model Card. Warning: some information will be lost. Use it at your own risk.z7Passing `eval_results` requires `model_name` to be set.r!   )ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   _to_unique_listrl   rV   model_index_to_eval_resultsKeyError	TypeErrorloggerwarningr+   	__class__superr;   
isinstancer   )r    ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   r7   r:   model_indexerrorru   r!   r"   r;   8  sF   




zModelCardData.__init__c                 C   s0   | j durt| j| j |d< |d= |d= dS dS )z[Format the internal data dict. In this case, we convert eval results to a valid model indexNrm   rc   rj   )rc   eval_results_to_model_indexrj   rB   r!   r!   r"   rA   o  s   
zModelCardData._to_dict)r-   r.   r/   r0   r   r	   r1   r   r   r3   r;   rA   __classcell__r!   r!   rz   r"   r`      sV    B
	


7r`   c                       s&  e Zd ZdZddddddddddddddddeeeee f  deeeee f  deeeee f  deeeee f  d	eeeee f  d
eeeee f  deee  deeeee f  deeeee f  dee dee dee deeeee f  de	f fddZ
dd Z  ZS )DatasetCardDataa	  Dataset Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        language (`List[str]`, *optional*):
            Language of dataset's data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual".
        license (`Union[str, List[str]]`, *optional*):
            License(s) of this dataset. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        annotations_creators (`Union[str, List[str]]`, *optional*):
            How the annotations for the dataset were created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'no-annotation', 'other'.
        language_creators (`Union[str, List[str]]`, *optional*):
            How the text-based data in the dataset was created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'other'
        multilinguality (`Union[str, List[str]]`, *optional*):
            Whether the dataset is multilingual.
            Options are: 'monolingual', 'multilingual', 'translation', 'other'.
        size_categories (`Union[str, List[str]]`, *optional*):
            The number of examples in the dataset. Options are: 'n<1K', '1K<n<10K', '10K<n<100K',
            '100K<n<1M', '1M<n<10M', '10M<n<100M', '100M<n<1B', '1B<n<10B', '10B<n<100B', '100B<n<1T', 'n>1T', and 'other'.
        source_datasets (`List[str]]`, *optional*):
            Indicates whether the dataset is an original dataset or extended from another existing dataset.
            Options are: 'original' and 'extended'.
        task_categories (`Union[str, List[str]]`, *optional*):
            What categories of task does the dataset support?
        task_ids (`Union[str, List[str]]`, *optional*):
            What specific tasks does the dataset support?
        paperswithcode_id (`str`, *optional*):
            ID of the dataset on PapersWithCode.
        pretty_name (`str`, *optional*):
            A more human-readable name for the dataset. (ex. "Cats vs. Dogs")
        train_eval_index (`Dict`, *optional*):
            A dictionary that describes the necessary spec for doing evaluation on the Hub.
            If not provided, it will be gathered from the 'train-eval-index' key of the kwargs.
        config_names (`Union[str, List[str]]`, *optional*):
            A list of the available dataset configs for the dataset.
    NF)rd   rf   annotations_creatorslanguage_creatorsmultilingualitysize_categoriessource_datasetstask_categoriestask_idspaperswithcode_idpretty_nametrain_eval_indexconfig_namesr7   rd   rf   r~   r   r   r   r   r   r   r   r   r   r   r7   c                   sp   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
|| _|p+|dd | _t jdi | d S )Ntrain-eval-indexr!   )r~   r   rd   rf   r   r   r   r   r   r   r   r   rV   r   rv   r;   )r    rd   rf   r~   r   r   r   r   r   r   r   r   r   r   r7   r:   rz   r!   r"   r;     s   zDatasetCardData.__init__c                 C   s   | d|d< d S )Nr   r   )rV   rB   r!   r!   r"   rA     s   zDatasetCardData._to_dict)r-   r.   r/   r0   r   r	   r1   r   r   r3   r;   rA   r|   r!   r!   rz   r"   r}   v  s\    *
	
$r}   c                       s   e Zd ZdZddddddddddddddee dee dee dee d	ee d
ee dee dee deee  deee  deee  def fddZ	  Z
S )SpaceCardDataa	  Space Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    To get an exhaustive reference of Spaces configuration, please visit https://huggingface.co/docs/hub/spaces-config-reference#spaces-configuration-reference.

    Args:
        title (`str`, *optional*)
            Title of the Space.
        sdk (`str`, *optional*)
            SDK of the Space (one of `gradio`, `streamlit`, `docker`, or `static`).
        sdk_version (`str`, *optional*)
            Version of the used SDK (if Gradio/Streamlit sdk).
        python_version (`str`, *optional*)
            Python version used in the Space (if Gradio/Streamlit sdk).
        app_file (`str`, *optional*)
            Path to your main application file (which contains either gradio or streamlit Python code, or static html code).
            Path is relative to the root of the repository.
        app_port (`str`, *optional*)
            Port on which your application is running. Used only if sdk is `docker`.
        license (`str`, *optional*)
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        duplicated_from (`str`, *optional*)
            ID of the original Space if this is a duplicated Space.
        models (List[`str`], *optional*)
            List of models related to this Space. Should be a dataset ID found on https://hf.co/models.
        datasets (`List[str]`, *optional*)
            List of datasets related to this Space. Should be a dataset ID found on https://hf.co/datasets.
        tags (`List[str]`, *optional*)
            List of tags to add to your Space that can be used when filtering on the Hub.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the space card.

    Example:
        ```python
        >>> from huggingface_hub import SpaceCardData
        >>> card_data = SpaceCardData(
        ...     title="Dreambooth Training",
        ...     license="mit",
        ...     sdk="gradio",
        ...     duplicated_from="multimodalart/dreambooth-training"
        ... )
        >>> card_data.to_dict()
        {'title': 'Dreambooth Training', 'sdk': 'gradio', 'license': 'mit', 'duplicated_from': 'multimodalart/dreambooth-training'}
        ```
    NF)titlesdksdk_versionpython_versionapp_fileapp_portrf   duplicated_frommodelsrb   rl   r7   r   r   r   r   r   r   rf   r   r   rb   rl   r7   c                   s\   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	t
|| _t jdi | d S )Nr!   )r   r   r   r   r   r   rf   r   r   rb   ro   rl   rv   r;   )r    r   r   r   r   r   r   rf   r   r   rb   rl   r7   r:   rz   r!   r"   r;     s   
zSpaceCardData.__init__)r-   r.   r/   r0   r   r1   r_   r   r3   r;   r|   r!   r!   rz   r"   r     sN    4	



r   rx   r   c                 C   s  g }| D ]}|d }|d }|D ]}|d d }|d  d}|d d }|d d }	|d  d}
|d  d}|d  d}|d  d	}| d
i  d}| d
i  d}|d D ]`}|d }|d }| d}| d	}| d}| d}| d}td!i d|d|d|	d|d|d|d|
d|d|d|d|d|d|d|d|d|d|}|| q]qq||fS )"a  Takes in a model index and returns the model name and a list of `huggingface_hub.EvalResult` objects.

    A detailed spec of the model index can be found here:
    https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1

    Args:
        model_index (`List[Dict[str, Any]]`):
            A model index data structure, likely coming from a README.md file on the
            Hugging Face Hub.

    Returns:
        model_name (`str`):
            The name of the model as found in the model index. This is used as the
            identifier for the model on leaderboards like PapersWithCode.
        eval_results (`List[EvalResult]`):
            A list of `huggingface_hub.EvalResult` objects containing the metrics
            reported in the provided model_index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import model_index_to_eval_results
        >>> # Define a minimal model index
        >>> model_index = [
        ...     {
        ...         "name": "my-cool-model",
        ...         "results": [
        ...             {
        ...                 "task": {
        ...                     "type": "image-classification"
        ...                 },
        ...                 "dataset": {
        ...                     "type": "beans",
        ...                     "name": "Beans"
        ...                 },
        ...                 "metrics": [
        ...                     {
        ...                         "type": "accuracy",
        ...                         "value": 0.9
        ...                     }
        ...                 ]
        ...             }
        ...         ]
        ...     }
        ... ]
        >>> model_name, eval_results = model_index_to_eval_results(model_index)
        >>> model_name
        'my-cool-model'
        >>> eval_results[0].task_type
        'image-classification'
        >>> eval_results[0].metric_type
        'accuracy'

        ```
    nameresultstasktypedatasetconfigsplitrevisionargssourceurlri   r=   r   verifyTokenr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   Nr!   )rU   r   append)rx   rc   elemr   r   resultr   r   r   r   r   r   r   r   r   r   metricr   r   r   r   r   r   r   eval_resultr!   r!   r"   rp     s|   8




	
)rp   c                 C   sN   t | tttfrt| dd | D S t | tr%t| dd |  D S | S )zk
    Recursively remove `None` values from a dict. Borrowed from: https://stackoverflow.com/a/20558778
    c                 s   s     | ]}|d urt |V  qd S r8   _remove_none)r<   xr!   r!   r"   	<genexpr>  s    z_remove_none.<locals>.<genexpr>c                 s   s4    | ]\}}|d ur|d urt |t |fV  qd S r8   r   )r<   rG   vr!   r!   r"   r     s   2 )rw   rJ   r5   rK   r   dictr&   )objr!   r!   r"   r     s
   
r   rj   rc   c           
   	   C   s   t t}|D ]
}||j | qg }| D ]@}|d }|j|jd|j|j|j	|j
|j|jddd |D d}|jdurRd|ji}|jdurN|j|d	< ||d
< || q| |dg}	t|	S )a  Takes in given model name and list of `huggingface_hub.EvalResult` and returns a
    valid model-index that will be compatible with the format expected by the
    Hugging Face Hub.

    Args:
        model_name (`str`):
            Name of the model (ex. "my-cool-model"). This is used as the identifier
            for the model on leaderboards like PapersWithCode.
        eval_results (`List[EvalResult]`):
            List of `huggingface_hub.EvalResult` objects containing the metrics to be
            reported in the model-index.

    Returns:
        model_index (`List[Dict[str, Any]]`): The eval_results converted to a model-index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import eval_results_to_model_index, EvalResult
        >>> # Define minimal eval_results
        >>> eval_results = [
        ...     EvalResult(
        ...         task_type="image-classification",  # Required
        ...         dataset_type="beans",  # Required
        ...         dataset_name="Beans",  # Required
        ...         metric_type="accuracy",  # Required
        ...         metric_value=0.9,  # Required
        ...     )
        ... ]
        >>> eval_results_to_model_index("my-cool-model", eval_results)
        [{'name': 'my-cool-model', 'results': [{'task': {'type': 'image-classification'}, 'dataset': {'name': 'Beans', 'type': 'beans'}, 'metrics': [{'type': 'accuracy', 'value': 0.9}]}]}]

        ```
    r   )r   r   )r   r   r   r   r   r   c              
   S   s.   g | ]}|j |j|j|j|j|j|jd qS ))r   r=   r   r   r   r   r   )r   r   r   r   r   r   r   )r<   r   r!   r!   r"   
<listcomp>  s    
z/eval_results_to_model_index.<locals>.<listcomp>)r   r   ri   Nr   r   r   )r   r   )r   rJ   r#   r   valuesr   r   r   r   r   r   r   r   r   r   r   )
rj   rc   task_and_ds_types_mapr   model_index_datar   sample_resultdatar   rx   r!   r!   r"   r{     s@   %



r{   rl   c                 C   s0   | d u r| S g }| D ]}||vr| | q
|S r8   )r   )rl   unique_tagstagr!   r!   r"   ro     s   
ro   )r?   collectionsr   dataclassesr   typingr   r   r   r   r   r	   huggingface_hub.utilsr
   r   
get_loggerr-   rs   r   r6   r`   r}   r   r1   rp   r   r{   ro   r!   r!   r!   r"   <module>   s$     
 S~P*Qh&&\