U
    9%e~#                     @   s   d dl mZmZmZmZ d dlZddlmZm	Z	m
Z
mZmZ ddlmZmZ e
 rjd dlmZ ddlmZ e	 rdd	lmZmZmZmZ eeZeeef Zee ZeeG d
d deZdS )    )AnyDictListUnionN   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )PIPELINE_INIT_ARGSPipeline)Image)
load_image)*MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES.MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMESc                       sb   e Zd ZdZ fddZdd Zeeee	 f d fddZ
dd
dZdd ZdddZ  ZS )ImageSegmentationPipelinea  
    Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
    their classes.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
    >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    >>> len(segments)
    2

    >>> segments[0]["label"]
    'bird'

    >>> segments[1]["label"]
    'bird'

    >>> type(segments[0]["mask"])  # This is a black and white mask showing where is the bird on the original image.
    <class 'PIL.Image.Image'>

    >>> segments[0]["mask"].size
    (768, 512)
    ```


    This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"image-segmentation"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
    c                    sh   t  j|| | jdkr*td| j dt| d t }|t	 |t
 |t | | d S )NtfzThe z is only available in PyTorch.Zvision)super__init__	framework
ValueError	__class__r   r   copyupdater   r   r   Zcheck_model_type)selfargskwargsmappingr    h/var/www/html/Darija-Ai-API/env/lib/python3.8/site-packages/transformers/pipelines/image_segmentation.pyr   C   s    




z"ImageSegmentationPipeline.__init__c                 K   s   i }i }d|kr(|d |d< |d |d< d|kr<|d |d< d|krP|d |d< d|krd|d |d< d|krx|d |d< |i |fS )Nsubtask	thresholdmask_thresholdoverlap_mask_area_thresholdtimeoutr#   )r   r    Zpreprocess_kwargsZpostprocess_kwargsr#   r#   r$   _sanitize_parametersP   s    z.ImageSegmentationPipeline._sanitize_parameters)returnc                    s   t  j|f|S )a	  
        Perform segmentation (detect masks & classes) in the image(s) passed as inputs.

        Args:
            images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing an HTTP(S) link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
                same format: all as HTTP(S) links, all as local paths, or all as PIL images.
            subtask (`str`, *optional*):
                Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
                capabilities. If not set, the pipeline will attempt tp resolve in the following order:
                  `panoptic`, `instance`, `semantic`.
            threshold (`float`, *optional*, defaults to 0.9):
                Probability threshold to filter out predicted masks.
            mask_threshold (`float`, *optional*, defaults to 0.5):
                Threshold to use when turning the predicted masks into binary values.
            overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
                Mask overlap threshold to eliminate small, disconnected segments.
            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
                the call may block forever.

        Return:
            A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a
            list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries
            corresponding to each image.

            The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
            the following keys:

            - **label** (`str`) -- The class label identified by the model.
            - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
              the original image. Returns a mask filled with zeros if no object is found.
            - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
              "object" described by the label and the mask.
        )r   __call__)r   imagesr    r"   r#   r$   r,   a   s    *z"ImageSegmentationPipeline.__call__Nc                 C   s   t ||d}|j|jfg}| jjjjdkr|d kr8i }n
d|gi}| jf |gdd|}| j|d d| jjj	| j
dd |d< n| j|gdd}||d	< |S )
N)r)   ZOneFormerConfigZtask_inputspt)r-   return_tensors
max_length)paddingr0   r/   Z	input_idstarget_size)r   heightwidthmodelconfigr   __name__image_processorZ	tokenizerZtask_seq_lenr   )r   imager%   r)   r2   r    inputsr#   r#   r$   
preprocess   s$    

z$ImageSegmentationPipeline.preprocessc                 C   s"   | d}| jf |}||d< |S )Nr2   )popr5   )r   Zmodel_inputsr2   model_outputsr#   r#   r$   _forward   s    
z"ImageSegmentationPipeline._forward?      ?c                 C   s  d }|dkr"t | jdr"| jj}n|dkr>t | jdr>| jj}|d k	r||||||d dd }g }|d }	|d	 D ]Z}
|	|
d
 kd }tj| tj	dd}| j
jj|
d  }|
d }||||d qtn|dkrht | jdrh| jj||d dd }g }| }	t|	}|D ]H}|	|kd }tj|tj	dd}| j
jj| }|d ||d qntd| dt| j
 |S )N>   Npanoptic"post_process_panoptic_segmentation>   Ninstance"post_process_instance_segmentationr2   )r&   r'   r(   target_sizesr   segmentationZsegments_infoid   L)modeZlabel_idscore)rK   labelmask>   Nsemantic"post_process_semantic_segmentation)rE   zSubtask z is not supported for model )hasattrr8   rB   rD   r   Z	fromarraynumpyZastypenpZuint8r5   r6   Zid2labelappendrO   uniquer   type)r   r=   r%   r&   r'   r(   fnoutputs
annotationrF   segmentrM   rL   rK   labelsr#   r#   r$   postprocess   sP    
 
z%ImageSegmentationPipeline.postprocess)NN)Nr?   r@   r@   )r7   
__module____qualname____doc__r   r*   r   Predictionsr   
Predictionr,   r;   r>   r[   __classcell__r#   r#   r"   r$   r      s   #,
       r   ) typingr   r   r   r   rQ   rR   utilsr   r   r	   r
   r   baser   r   ZPILr   Zimage_utilsr   Zmodels.auto.modeling_autor   r   r   r   Z
get_loggerr7   loggerstrr`   r_   r   r#   r#   r#   r$   <module>   s   
