o
    Qe                 	   @   s  d dl ZddlmZ ddlmZmZmZ ddlm	Z	m
Z
 ddlmZmZ ddlmZmZmZmZmZ ddlmZ dd	lmZmZmZmZ d d
lT d dlmZmZ g dZ				dBddZ					dCddZ ddgg dddddgdddf	ddZ!			 	dDddZ"			 					dEddZ#G dd  d eZ$			dFd!d"Z%dGd#d$Z&dHd&d'Z'dId(d)Z(G d*d+ d+eZ)dId,d-Z*G d.d/ d/eZ+		0		dJd1d2Z,G d3d4 d4eZ-G d5d6 d6eZ.	7				dKd8d9Z/	:	;		<				dLd=d>Z0		?	 				dMd@dAZ1dS )N    N   )LayerHelper)check_variable_and_dtype
check_typecheck_dtype)corelayers)nnutils)LayerConv2D
SequentialReLUBatchNorm2D)Normal)Variable_non_static_modein_dygraph_mode_in_legacy_dygraph)*)_C_ops_legacy_C_ops)	yolo_lossyolo_box	prior_box	box_coderdeform_conv2dDeformConv2Ddistribute_fpn_proposalsgenerate_proposals	read_filedecode_jpegroi_poolRoIPool
psroi_pool	PSRoIPool	roi_alignRoIAlignnms
matrix_nmsT      ?c                 C   sn  t  rt| |||||||||	|\}}}|S t r5t| |||d|d|d|d|d|d|	d|\}}}|S tdi t }t| d	d
dgd t|dd
dgd t|ddd t|dt	t
fd t|dt	t
fd t|dtd t|dtd t|	dtd |j| jd}|jdd}|jdd}| ||d}|dur||d< ||||||	|d}|jd||||d|d |S )a  

    This operator generates YOLOv3 loss based on given predict result and ground
    truth boxes.

    The output of previous network is in shape [N, C, H, W], while H and W
    should be the same, H and W specify the grid size, each grid point predict
    given number bounding boxes, this given number, which following will be represented as S,
    is specified by the number of anchor clusters in each scale. In the second dimension(the channel
    dimension), C should be equal to S * (class_num + 5), class_num is the object
    category number of source dataset(such as 80 in coco dataset), so in the
    second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
    also includes confidence score of the box and class one-hot key of each anchor box.

    Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box predictions
    should be as follows:

    $$
    b_x = \\sigma(t_x) + c_x
    $$
    $$
    b_y = \\sigma(t_y) + c_y
    $$
    $$
    b_w = p_w e^{t_w}
    $$
    $$
    b_h = p_h e^{t_h}
    $$

    In the equation above, :math:`c_x, c_y` is the left top corner of current grid
    and :math:`p_w, p_h` is specified by anchors.

    As for confidence score, it is the logistic regression value of IoU between
    anchor boxes and ground truth boxes, the score of the anchor box which has
    the max IoU should be 1, and if the anchor box has IoU bigger than ignore
    thresh, the confidence score loss of this anchor box will be ignored.

    Therefore, the YOLOv3 loss consists of three major parts: box location loss,
    objectness loss and classification loss. The L1 loss is used for
    box coordinates (w, h), sigmoid cross entropy loss is used for box
    coordinates (x, y), objectness loss and classification loss.

    Each groud truth box finds a best matching anchor box in all anchors.
    Prediction of this anchor box will incur all three parts of losses, and
    prediction of anchor boxes with no GT box matched will only incur objectness
    loss.

    In order to trade off box coordinate losses between big boxes and small
    boxes, box coordinate losses will be mutiplied by scale weight, which is
    calculated as follows.

    $$
    weight_{box} = 2.0 - t_w * t_h
    $$

    Final loss will be represented as follows.

    $$
    loss = (loss_{xy} + loss_{wh}) * weight_{box} + loss_{conf} + loss_{class}
    $$

    While :attr:`use_label_smooth` is set to be :attr:`True`, the classification
    target will be smoothed when calculating classification loss, target of
    positive samples will be smoothed to :math:`1.0 - 1.0 / class\_num` and target of
    negetive samples will be smoothed to :math:`1.0 / class\_num`.

    While :attr:`gt_score` is given, which means the mixup score of ground truth
    boxes, all losses incured by a ground truth box will be multiplied by its
    mixup score.

    Args:
        x (Tensor): The input tensor of YOLOv3 loss operator, This is a 4-D
                      tensor with shape of [N, C, H, W]. H and W should be same,
                      and the second dimension(C) stores box locations, confidence
                      score and classification one-hot keys of each anchor box.
                      The data type is float32 or float64.
        gt_box (Tensor): groud truth boxes, should be in shape of [N, B, 4],
                          in the third dimension, x, y, w, h should be stored.
                          x,y is the center coordinate of boxes, w, h are the
                          width and height, x, y, w, h should be divided by
                          input image height to scale to [0, 1].
                          N is the batch number and B is the max box number in
                          an image.The data type is float32 or float64.
        gt_label (Tensor): class id of ground truth boxes, should be in shape
                            of [N, B].The data type is int32.
        anchors (list|tuple): The anchor width and height, it will be parsed
                              pair by pair.
        anchor_mask (list|tuple): The mask index of anchors used in current
                                  YOLOv3 loss calculation.
        class_num (int): The number of classes.
        ignore_thresh (float): The ignore threshold to ignore confidence loss.
        downsample_ratio (int): The downsample ratio from network input to YOLOv3
                                loss input, so 32, 16, 8 should be set for the
                                first, second, and thrid YOLOv3 loss operators.
        name (string): The default value is None.  Normally there is no need
                       for user to set this property.  For more information,
                       please refer to :ref:`api_guide_Name`
        gt_score (Tensor): mixup score of ground truth boxes, should be in shape
                            of [N, B]. Default None.
        use_label_smooth (bool): Whether to use label smooth. Default True.
        scale_x_y (float): Scale the center point of decoded bounding box.
                           Default 1.0

    Returns:
        Tensor: A 1-D tensor with shape [N], the value of yolov3 loss

    Examples:
      .. code-block:: python

          import paddle

          x = paddle.rand([2, 14, 8, 8]).astype('float32')
          gt_box = paddle.rand([2, 10, 4]).astype('float32')
          gt_label = paddle.rand([2, 10]).astype('int32')


          loss = paddle.vision.ops.yolo_loss(x,
                                             gt_box=gt_box,
                                             gt_label=gt_label,
                                             anchors=[10, 13, 16, 30],
                                             anchor_mask=[0, 1],
                                             class_num=2,
                                             ignore_thresh=0.7,
                                             downsample_ratio=8,
                                             use_label_smooth=True,
                                             scale_x_y=1.)
    anchorsanchor_mask	class_numignore_threshdownsample_ratiouse_label_smooth	scale_x_yyolov3_lossxfloat32float64r   gt_boxgt_labelint32dtype)XZGTBoxZGTLabelNZGTScore)r+   r,   r-   r.   r/   r0   r1   )ZLossZObjectnessMaskZGTMatchMasktypeinputsoutputsattrs)r2   )r   r   r2   r   r   r   localsr   r   listtupleintfloatbool"create_variable_for_type_inferencer:   	append_op)r3   r6   r7   r+   r,   r-   r.   r/   Zgt_scorer0   namer1   Zloss_helperZobjectness_maskZgt_match_maskr>   r@    rL   AD:\Projects\ConvertPro\env\Lib\site-packages\paddle/vision/ops.pyr   9   s    



r   F      ?c                 C   s  t  rt| ||||||||	|

\}}||fS t r6t| |d|d|d|d|d|d|d|	d|
\}}||fS tdi t }t| d
ddgd	 t|ddd	 t|dt	t
fd	 t|dtd	 |j| jd}|j| jd}|||||||	|
d}|jd	| |d||d|d ||fS )af  

    This operator generates YOLO detection boxes from output of YOLOv3 network.
    
    The output of previous network is in shape [N, C, H, W], while H and W
    should be the same, H and W specify the grid size, each grid point predict 
    given number boxes, this given number, which following will be represented as S,
    is specified by the number of anchors. In the second dimension(the channel
    dimension), C should be equal to S * (5 + class_num) if :attr:`iou_aware` is false,
    otherwise C should be equal to S * (6 + class_num). class_num is the object
    category number of source dataset(such as 80 in coco dataset), so the 
    second(channel) dimension, apart from 4 box location coordinates x, y, w, h, 
    also includes confidence score of the box and class one-hot key of each anchor 
    box.

    Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box 
    predictions should be as follows:

    $$
    b_x = \\sigma(t_x) + c_x
    $$
    $$
    b_y = \\sigma(t_y) + c_y
    $$
    $$
    b_w = p_w e^{t_w}
    $$
    $$
    b_h = p_h e^{t_h}
    $$

    in the equation above, :math:`c_x, c_y` is the left top corner of current grid
    and :math:`p_w, p_h` is specified by anchors.

    The logistic regression value of the 5th channel of each anchor prediction boxes
    represents the confidence score of each prediction box, and the logistic
    regression value of the last :attr:`class_num` channels of each anchor prediction 
    boxes represents the classifcation scores. Boxes with confidence scores less than
    :attr:`conf_thresh` should be ignored, and box final scores is the product of 
    confidence scores and classification scores.

    $$
    score_{pred} = score_{conf} * score_{class}
    $$

    where the confidence scores follow the formula bellow

    .. math::

        score_{conf} = \begin{case}
                         obj, \text{if } iou_aware == flase \\
                         obj^{1 - iou_aware_factor} * iou^{iou_aware_factor}, \text{otherwise}
                       \end{case}

    Args:
        x (Tensor): The input tensor of YoloBox operator is a 4-D tensor with
                      shape of [N, C, H, W]. The second dimension(C) stores box
                      locations, confidence score and classification one-hot keys
                      of each anchor box. Generally, X should be the output of
                      YOLOv3 network. The data type is float32 or float64. 
        img_size (Tensor): The image size tensor of YoloBox operator, This is a
                           2-D tensor with shape of [N, 2]. This tensor holds
                           height and width of each input image used for resizing
                           output box in input image scale. The data type is int32. 
        anchors (list|tuple): The anchor width and height, it will be parsed pair
                              by pair.
        class_num (int): The number of classes.
        conf_thresh (float): The confidence scores threshold of detection boxes.
                             Boxes with confidence scores under threshold should
                             be ignored.
        downsample_ratio (int): The downsample ratio from network input to
                                :attr:`yolo_box` operator input, so 32, 16, 8
                                should be set for the first, second, and thrid
                                :attr:`yolo_box` layer.
        clip_bbox (bool): Whether clip output bonding box in :attr:`img_size`
                          boundary. Default true.
        scale_x_y (float): Scale the center point of decoded bounding box.
                           Default 1.0
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
        iou_aware (bool): Whether use iou aware. Default false
        iou_aware_factor (float): iou aware factor. Default 0.5

    Returns:
        Tensor: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification 
        scores of boxes.

    Examples:

    .. code-block:: python

        import paddle

        x = paddle.rand([2, 14, 8, 8]).astype('float32')
        img_size = paddle.ones((2, 2)).astype('int32')

        boxes, scores = paddle.vision.ops.yolo_box(x,
                                                   img_size=img_size,
                                                   anchors=[10, 13, 16, 30],
                                                   class_num=2,
                                                   conf_thresh=0.01,
                                                   downsample_ratio=8,
                                                   clip_bbox=True,
                                                   scale_x_y=1.)
    r+   r-   conf_threshr/   	clip_bboxr1   	iou_awareiou_aware_factorr   r3   r4   r5   img_sizer8   r9   )r+   r-   rO   r/   rP   r1   rQ   rR   )r;   ZImgSize)BoxesScoresr<   N)r   )r   r   r   r   r   r   rA   r   r   rB   rC   rE   rG   r:   rH   )r3   rS   r+   r-   rO   r/   rP   rI   r1   rQ   rR   boxesscoresrK   r@   rL   rL   rM   r      s~   xr   )皙?rX   皙?rY           c                 C   s  t di t }| }t| dg dd dd }||s |g}||s'|g}||s.|g}t|dks8tdttt|}ttt|}ttt|}d}|durht|d	krh|d	 d	krh||sf|g}|}t	 r|\}}|dkrug }t
| ||||||||||	|
\}}||fS t rd
|d|d|d|d|d|d	 d|d d|	d|
f}|dur|d|f7 }tj| |g|R  \}}||fS ||||||d	 |d |	|
d	}|dur||d< ||}||}|jd| |d||d|d d|_d|_||fS )a
  

    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.

    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

    Args:
       input (Tensor): 4-D tensor(NCHW), the data type should be float32 or float64.
       image (Tensor): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes (list|tuple|float): the min sizes of generated prior boxes.
       max_sizes (list|tuple|None, optional): the max sizes of generated prior boxes.
            Default: None.
       aspect_ratios (list|tuple|float, optional): the aspect ratios of generated
            prior boxes. Default: [1.].
       variance (list|tuple, optional): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip (bool): Whether to flip aspect ratios. Default:False.
       clip (bool): Whether to clip out-of-boundary boxes. Default: False.
       steps (list|tuple, optional): Prior boxes steps across width and height, If
            steps[0] equals to 0.0 or steps[1] equals to 0.0, the prior boxes steps across
            height or weight of the input will be automatically calculated.
            Default: [0., 0.]
       offset (float, optional)): Prior boxes center offset. Default: 0.5
       min_max_aspect_ratios_order (bool, optional): If set True, the output prior box is
            in order of [min, max, aspect_ratios], which is consistent with
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
       name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Tensor: the output prior boxes and the expanded variances of PriorBox.
            The prior boxes is a 4-D tensor, the layout is [H, W, num_priors, 4],
            num_priors is the total box count of each position of input.
            The expanded variances is a 4-D tensor, same shape as the prior boxes.

    Examples:
        .. code-block:: python

            import paddle

            input = paddle.rand((1, 3, 6, 9), dtype=paddle.float32)
            image = paddle.rand((1, 3, 9, 12), dtype=paddle.float32)

            box, var = paddle.vision.ops.prior_box(
                input=input,
                image=image,
                min_sizes=[2.0, 4.0],
                clip=True,
                flip=True)

    r   input)uint8Zint8r4   r5   c                 S   s   t | tp	t | tS N)
isinstancerB   rC   )datarL   rL   rM   _is_list_or_tuple_/  s   z%prior_box.<locals>._is_list_or_tuple_r   z steps should be (step_w, step_h)Nr   	min_sizesaspect_ratios	variancesflipclipstep_wstep_h   offsetmin_max_aspect_ratios_order	max_sizes)	ra   rb   rc   rd   re   rf   rg   ri   rj   )InputZImage)rT   	Variancesr<   T)r   )r   rA   input_dtyper   len
ValueErrorrB   maprE   r   r   r   r   r   rG   rH   stop_gradient)r[   imagera   rk   rb   variancerd   re   Zstepsri   rj   rI   rK   r:   r`   Zcur_max_sizesrf   rg   boxvarr@   rL   rL   rM   r     s   G 

r   encode_center_sizec                 C   sZ  t | dddgd t |dddgd t r=t|tr't| |||||g }|S t|tr9t| d|||||}|S tdt rnt|trTt	| ||d|d	|d
|	}|S t|trjt	| d|d|d	|d
|d|}|S tdt
di t }|j| jd}| |d}	|||d}
t|tr||	d< nt|tr||
d< ntd|jd|	|
d|id |S )a2  
    Encode/Decode the target bounding box with the priorbox information.

    The Encoding schema described below:

    .. math::

        ox &= (tx - px) / pw / pxv

        oy &= (ty - py) / ph / pyv

        ow &= log(abs(tw / pw)) / pwv

        oh &= log(abs(th / ph)) / phv

    The Decoding schema described below:

    .. math::

        ox &= (pw * pxv * tx * + px) - tw / 2

        oy &= (ph * pyv * ty * + py) - th / 2

        ow &= exp(pwv * tw) * pw + tw / 2

        oh &= exp(phv * th) * ph + th / 2

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
    the priorbox's (anchor) center coordinates, width and height. `pxv`,
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
    `ow`, `oh` denote the encoded/decoded coordinates, width and height.
    During Box Decoding, two modes for broadcast are supported. Say target
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
    [M, 4]. Then prior box will broadcast to target box along the
    assigned axis.

    Args:
        prior_box (Tensor): Box list prior_box is a 2-D Tensor with shape
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system.
            [xmax, ymax] is the right bottom coordinate of the anchor box.
        prior_box_var (List|Tensor|None): prior_box_var supports three types
            of input. One is Tensor with shape [M, 4] which holds M group and
            data type is float32 or float64. The second is list consist of
            4 elements shared by all boxes and data type is float32 or float64.
            Other is None and not involved in calculation.
        target_box (Tensor): This input can be a 2-D LoDTensor with shape
            [N, 4] when code_type is 'encode_center_size'. This input also can
            be a 3-D Tensor with shape [N, M, 4] when code_type is
            'decode_center_size'. Each box is represented as
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64.
        code_type (str, optional): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size`
            by default.
        box_normalized (bool, optional): Whether treat the priorbox as a normalized box.
            Set true by default.
        axis (int, optional): Which axis in PriorBox to broadcast for box decode,
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is
            `decode_center_size`. Set 0 by default.
        name (str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

    Returns:
        Tensor: output boxes, when code_type is 'encode_center_size', the
            output tensor of box_coder_op with shape [N, M, 4] representing the
            result of N target boxes encoded with M Prior boxes and variances.
            When code_type is 'decode_center_size', N represents the batch size
            and M represents the number of decoded boxes.

    Examples:
        .. code-block:: python

            import paddle

            # For encode
            prior_box_encode = paddle.rand((80, 4), dtype=paddle.float32)
            prior_box_var_encode = paddle.rand((80, 4), dtype=paddle.float32)
            target_box_encode = paddle.rand((20, 4), dtype=paddle.float32)
            output_encode = paddle.vision.ops.box_coder(
                prior_box=prior_box_encode,
                prior_box_var=prior_box_var_encode,
                target_box=target_box_encode,
                code_type="encode_center_size")

            # For decode
            prior_box_decode = paddle.rand((80, 4), dtype=paddle.float32)
            prior_box_var_decode = paddle.rand((80, 4), dtype=paddle.float32)
            target_box_decode = paddle.rand((20, 80, 4), dtype=paddle.float32)
            output_decode = paddle.vision.ops.box_coder(
                prior_box=prior_box_decode,
                prior_box_var=prior_box_var_decode,
                target_box=target_box_decode,
                code_type="decode_center_size",
                box_normalized=False)

    r   r4   r5   r   
target_boxNz,Input prior_box_var must be Variable or list	code_typebox_normalizedaxisrt   r9   )ZPriorBoxZ	TargetBox)ry   rz   r{   ZPriorBoxVarZ	OutputBoxr=   r>   r@   r?   )r   )r   r   r^   r   r   r   rB   	TypeErrorr   r   r   rA   rG   r:   rH   )r   Zprior_box_varrx   ry   rz   r{   rI   Z
output_boxrK   r>   r@   rL   rL   rM   r     s   o








r   rh   c                 C   s  t |dd}t |dd}t |dd}|	du rdnd}t r@t| |||	|||||d
}|dur<tj||dd	}|S |}|S t rd
|d|d|d|d|ddf}|rcd}tt	|| ||g|R  }nd}tt	|| ||	|g|R  }|durtj||dd	}|S |}|S t
| dddgd t
|dddgd | jd }td i t }| }t |dd}t |dd}t |dd}||}|rd}| ||d}n	d}| |||	d}d|i}|||||dd}|j||||d |dur||}|jd|g|gdd|giddid |S |}|S )!a  
    Compute 2-D deformable convolution on 4-D input.
    Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:


    Deformable Convolution v2:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}

    Deformable Convolution v1:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}

    Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
    Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
    <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.

    Example:
        - Input:

          x shape: :math:`(N, C_{in}, H_{in}, W_{in})`

          weight shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

          offset shape: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`

          mask shape: :math:`(N, H_f * W_f, H_{out}, W_{out})`

        - Output:

          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

        Where

        .. math::

            H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
            W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1

    Args:
        x (Tensor): The input image with [N, C, H, W] format. A Tensor with type
            float32, float64.
        offset (Tensor): The input coordinate offset of deformable convolution layer.
            A Tensor with type float32, float64.
        weight (Tensor): The convolution kernel with shape [M, C/g, kH, kW], where M is
            the number of output channels, g is the number of groups, kH is the filter's
            height, kW is the filter's width.
        bias (Tensor, optional): The bias with shape [M,].
        stride (int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
            contain two integers, (stride_H, stride_W). Otherwise, the
            stride_H = stride_W = stride. Default: stride = 1.
        padding (int|list|tuple, optional): The padding size. If padding is a list/tuple, it must
            contain two integers, (padding_H, padding_W). Otherwise, the
            padding_H = padding_W = padding. Default: padding = 0.
        dilation (int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
            contain two integers, (dilation_H, dilation_W). Otherwise, the
            dilation_H = dilation_W = dilation. Default: dilation = 1.
        deformable_groups (int): The number of deformable group partitions.
            Default: deformable_groups = 1.
        groups (int, optonal): The groups number of the deformable conv layer. According to
            grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. Default: groups=1.
        mask (Tensor, optional): The input mask of deformable convolution layer.
            A Tensor with type float32, float64. It should be None when you use
            deformable convolution v1.
        name(str, optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
    Returns:
        Tensor: The tensor variable storing the deformable convolution \
                  result. A Tensor with type float32, float64.

    Examples:
        .. code-block:: python

          #deformable conv v2:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          weight = paddle.rand((16, 1, kh, kw))
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # mask shape should be [bs, hw * hw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          mask = paddle.rand((8, kh * kw, 26, 26))
          out = paddle.vision.ops.deform_conv2d(input, offset, weight, mask=mask)
          print(out.shape)
          # returns
          [8, 16, 26, 26]

          #deformable conv v1:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          weight = paddle.rand((16, 1, kh, kw))
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          out = paddle.vision.ops.deform_conv2d(input, offset, weight)
          print(out.shape)
          # returns
          [8, 16, 26, 26]
    r   stridepaddingdilationNTFrh   )r{   stridespaddings	dilationsdeformable_groupsgroupsim2col_stepZdeformable_conv_v1deformable_convr3   r4   r5   r   ri   )rl   FilterOffset)rl   r   r   MaskOutput)r   r   r   r   r   r   r<   elementwise_add)r;   YOutr{   )r   )r
   convert_to_listr   r   r   r	   r   r   getattrr   r   shaper   rA   rn   rG   rH   )r3   ri   weightbiasr~   r   r   r   r   maskrI   Zuse_deform_conv2d_v1Zpre_biasoutr@   Zop_typeZnum_channelsrK   r:   r>   r?   rL   rL   rM   r   W  s   }ZX
=;



r   c                       s:   e Zd ZdZ							d	 fdd	Zd
ddZ  ZS )r   a_  
    Compute 2-D deformable convolution on 4-D input.
    Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:


    Deformable Convolution v2:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}

    Deformable Convolution v1:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}

    Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
    Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
    <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.

    Example:
        - Input:

          x shape: :math:`(N, C_{in}, H_{in}, W_{in})`

          weight shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

          offset shape: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`

          mask shape: :math:`(N, H_f * W_f, H_{out}, W_{out})`

        - Output:

          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

        Where

        .. math::

            H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
            W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1


    Parameters:
        in_channels(int): The number of input channels in the input image.
        out_channels(int): The number of output channels produced by the convolution.
        kernel_size(int|list|tuple): The size of the convolving kernel.
        stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
            contain three integers, (stride_H, stride_W). Otherwise, the
            stride_H = stride_W = stride. The default value is 1.
        padding (int|list|tuple, optional): The padding size. If padding is a list/tuple, it must
            contain two integers, (padding_H, padding_W). Otherwise, the
            padding_H = padding_W = padding. Default: padding = 0.
        dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
            contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
            dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
        deformable_groups (int): The number of deformable group partitions.
            Default: deformable_groups = 1.
        groups(int, optional): The groups number of the Conv3D Layer. According to grouped
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. The default value is 1.
        weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
            of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as param_attr. If it is set to None, the parameter
            is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
            :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv2d.
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. The default value is None.
    Attribute:
        **weight** (Parameter): the learnable weights of filter of this layer.
        **bias** (Parameter or None): the learnable bias of this layer.
    Shape:
        - x: :math:`(N, C_{in}, H_{in}, W_{in})`
        - offset: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`
        - mask: :math:`(N, H_f * W_f, H_{out}, W_{out})`
        - output: :math:`(N, C_{out}, H_{out}, W_{out})`
        
        Where
        
        ..  math::

            H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1 \\
            W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1

    Examples:
        .. code-block:: python

          #deformable conv v2:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # mask shape should be [bs, hw * hw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          mask = paddle.rand((8, kh * kw, 26, 26))
          deform_conv = paddle.vision.ops.DeformConv2D(
              in_channels=1,
              out_channels=16,
              kernel_size=[kh, kw])
          out = deform_conv(input, offset, mask)
          print(out.shape)
          # returns
          [8, 16, 26, 26]

          #deformable conv v1:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # mask shape should be [bs, hw * hw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          deform_conv = paddle.vision.ops.DeformConv2D(
              in_channels=1,
              out_channels=16,
              kernel_size=[kh, kw])
          out = deform_conv(input, offset)
          print(out.shape)
          # returns
          [8, 16, 26, 26]
    rh   r   Nc                    s   t t   |	dusJ d|	 _|
 _| _| _| _| _d _	t
|dd _t
|dd _t
|dd _|| dkrFtd	t
|dd
 _||| g j } fdd} j| j| d _ j j jgdd _d S )NFz(weight_attr should not be False in Conv.rh   r   r~   r   kernel_sizer   z(in_channels must be divisible by groups.r   c                     s*   t  j j } d|  d }td|dS )N       @rN   rZ   r   )npprod_kernel_size_in_channelsr   )Zfilter_elem_numZstdselfrL   rM   _get_default_param_initializer  s   z=DeformConv2D.__init__.<locals>._get_default_param_initializer)r   attrZdefault_initializerT)r   r   Zis_bias)superr   __init__Z_weight_attrZ
_bias_attr_deformable_groups_groupsr   Z_out_channelsZ_channel_dimr
   r   _stride	_dilationr   rp   _paddingZcreate_parameterr   r   )r   in_channelsout_channelsr   r~   r   r   r   r   Zweight_attr	bias_attrZfilter_shaper   	__class__r   rM   r     s6   
zDeformConv2D.__init__c                 C   s.   t ||| j| j| j| j| j| j| j|d
}|S )N)
r3   ri   r   r   r~   r   r   r   r   r   )r   r   r   r   r   r   r   r   )r   r3   ri   r   r   rL   rL   rM   forward  s   zDeformConv2D.forward)rh   r   rh   rh   rh   NNr]   __name__
__module____qualname____doc__r   r   __classcell__rL   rL   r   rM   r   E  s     2r   c              
      s\  || d }t  r$|dusJ dt| ||||||\}	}
}|	||
fS t rO|dus/J dd|d|d|d|d|f
}tj| |||g|R  \}	}}
|	||
fS t| d	d
dgd tdi t d	  fddt	|D }	j
dd}d| i}|	|d}|dur||d< fddt	|D }
|
|d< nd}
jd|||||||dd |	||
fS )a  
        In Feature Pyramid Networks (FPN) models, it is needed to distribute
    all proposals into different FPN level, with respect to scale of the proposals,
    the referring scale and the referring level. Besides, to restore the order of
    proposals, we return an array which indicates the original index of rois
    in current proposals. To compute FPN level for each roi, the formula is given as follows:

    .. math::
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
    where BBoxArea is a function to compute the area of each roi.

    Args:
        fpn_rois (Tensor): The input fpn_rois. 2-D Tensor with shape [N, 4] and data type can be
            float32 or float64.
        min_level (int): The lowest level of FPN layer where the proposals come
            from.
        max_level (int): The highest level of FPN layer where the proposals
            come from.
        refer_level (int): The referring level of FPN layer with specified scale.
        refer_scale (int): The referring scale of FPN layer with specified level.
        pixel_offset (bool, optional): Whether there is pixel offset. If True, the offset of
            image shape will be 1. 'False' by default.
        rois_num (Tensor, optional): 1-D Tensor contains the number of RoIs in each image.
            The shape is [B] and data type is int32. B is the number of images.
            If rois_num not None, it will return a list of 1-D Tensor. Each element
            is the output RoIs' number of each image on the corresponding level
            and the shape is [B]. None by default.
        name (str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

    Returns:
        multi_rois (List) : The proposals in each FPN level. It is a list of 2-D Tensor with shape [M, 4], where M is
            and data type is same as `fpn_rois` . The length is max_level-min_level+1.
        restore_ind (Tensor): The index used to restore the order of fpn_rois. It is a 2-D Tensor with shape [N, 1]
            , where N is the number of total rois. The data type is int32.
        rois_num_per_level (List): A list of 1-D Tensor and each Tensor is
            the RoIs' number in each image on the corresponding level. The shape
            is [B] and data type of int32, where B is the number of images.

    Examples:
        .. code-block:: python

            import paddle

            fpn_rois = paddle.rand((10, 4))
            rois_num = paddle.to_tensor([3, 1, 4, 2], dtype=paddle.int32)

            multi_rois, restore_ind, rois_num_per_level = paddle.vision.ops.distribute_fpn_proposals(
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
                refer_level=4,
                refer_scale=224,
                rois_num=rois_num)
    rh   Nz,rois_num should not be None in dygraph mode.	min_level	max_levelrefer_levelrefer_scalepixel_offsetfpn_roisr4   r5   r   c                    s   g | ]}  qS rL   rG   .0ir:   rK   rL   rM   
<listcomp>  s    z,distribute_fpn_proposals.<locals>.<listcomp>r8   r9   ZFpnRois)ZMultiFpnRoisZRestoreIndexRoisNumc                    s   g | ]} j d dqS )r8   r9   r   r   )rK   rL   rM   r     s    
ZMultiLevelRoIsNum)r   r   r   r   r   r<   )r   )r   r   r   r   r   r   r   rA   rn   rangerG   rH   )r   r   r   r   r   r   rois_numrI   Znum_lvlZ
multi_roisZrois_num_per_levelZrestore_indr@   r>   r?   rL   r   rM   r     s   C








r   c                 C   sT   t  r	td| S t }d| i}tdi t }|d}|jd||d|id |S )a  
    Reads and outputs the bytes contents of a file as a uint8 Tensor
    with one dimension.

    Args:
        filename (str): Path of the file to be read.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
        A uint8 tensor.

    Examples:
        .. code-block:: python

            import cv2
            import paddle

            fake_img = (paddle.rand((400, 300, 3)).numpy() * 255).astype('uint8')

            cv2.imwrite('fake.jpg', fake_img)

            img_bytes = paddle.vision.ops.read_file('fake.jpg')

            print(img_bytes.shape)
            # [142915]
    filenamer    r\   r   r|   N)r    )r   r   r    dictr   rA   rG   rH   )r   rI   r>   r@   rK   r   rL   rL   rM   r      s   
r    	unchangedc                 C   sX   t  r
t| d|S d| i}d|i}tdi t }|d}|jd||d|id |S )	a  
    Decodes a JPEG image into a 3 dimensional RGB Tensor or 1 dimensional Gray Tensor.
    Optionally converts the image to the desired format.
    The values of the output tensor are uint8 between 0 and 255.

    Args:
        x (Tensor): A one dimensional uint8 tensor containing the raw bytes
            of the JPEG image.
        mode (str): The read mode used for optionally converting the image.
            Default: 'unchanged'.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
    Returns:
        Tensor: A decoded image tensor with shape (imge_channels, image_height, image_width)

    Examples:
        .. code-block:: python

            # required: gpu
            import cv2
            import numpy as np
            import paddle

            fake_img = (np.random.random(
                        (400, 300, 3)) * 255).astype('uint8')

            cv2.imwrite('fake.jpg', fake_img)

            img_bytes = paddle.vision.ops.read_file('fake.jpg')
            img = paddle.vision.ops.decode_jpeg(img_bytes)

            print(img.shape)
    moder;   r!   r\   r   r|   N)r!   )r   r   r!   r   rA   rG   rH   )r3   r   rI   r>   r@   rK   r   rL   rL   rM   r!     s   #
r!   c                 C   s   t |dtttfd t|tr||f}|\}}t| jdks"J dt| jd ||  }t r;t	| ||||||S t
 rMt	| ||d|d|d|d	|S tdi t }	|	 }
|	|
}|	jd| |d
d|i||||dd |S )al  
    Position sensitive region of interest pooling (also known as PSROIPooling) is to perform
    position-sensitive average pooling on regions of interest specified by input. It performs
    on inputs of nonuniform sizes to obtain fixed-size feature maps.

    PSROIPooling is proposed by R-FCN. Please refer to https://arxiv.org/abs/1605.06409 for more details.

    Args:
        x (Tensor): Input features with shape (N, C, H, W). The data type can be float32 or float64.
        boxes (Tensor): Box coordinates of ROIs (Regions of Interest) to pool over. It should be
                         a 2-D Tensor with shape (num_rois, 4). Given as [[x1, y1, x2, y2], ...],
                         (x1, y1) is the top left coordinates, and (x2, y2) is the bottom
                         right coordinates.
        boxes_num (Tensor): The number of boxes contained in each picture in the batch.
        output_size (int|Tuple(int, int))  The pooled output size(H, W), data type
                               is int32. If int, H and W are both equal to output_size.
        spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their
                               input scale to the scale used when pooling. Default: 1.0
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`

    Returns:
        4-D Tensor. The pooled ROIs with shape (num_rois, output_channels, pooled_h, pooled_w).
        The output_channels equal to C / (pooled_h * pooled_w), where C is the channels of input.

    Examples:
        .. code-block:: python

            import paddle
            x = paddle.uniform([2, 490, 28, 28], dtype='float32')
            boxes = paddle.to_tensor([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], dtype='float32')
            boxes_num = paddle.to_tensor([1, 2], dtype='int32')
            pool_out = paddle.vision.ops.psroi_pool(x, boxes, boxes_num, 7, 1.0)
            print(pool_out.shape)
            # [3, 10, 7, 7]
    output_sizer$      z0Input features with shape should be (N, C, H, W)rh   output_channelsspatial_scalepooled_heightpooled_widthr;   ZROIsr   )r   r   r   r   r<   N)r$   )r   rD   rC   rB   r^   ro   r   r   r   r$   r   r   r   rA   rn   rG   rH   )r3   rV   	boxes_numr   r   rI   r   r   r   rK   r:   r   rL   rL   rM   r$     sX   '
	
r$   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )r%   ae  
    This interface is used to construct a callable object of the ``PSRoIPool`` class. Please
    refer to :ref:`api_paddle_vision_ops_psroi_pool`.

    Args:
        output_size (int|Tuple(int, int))  The pooled output size(H, W), data type
                               is int32. If int, H and W are both equal to output_size.
        spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their
                               input scale to the scale used when pooling. Default: 1.0.

    Shape:
        - x: 4-D Tensor with shape (N, C, H, W).
        - boxes: 2-D Tensor with shape (num_rois, 4).
        - boxes_num: 1-D Tensor.
        - output: 4-D tensor with shape (num_rois, output_channels, pooled_h, pooled_w).
              The output_channels equal to C / (pooled_h * pooled_w), where C is the channels of input.

    Returns:
        None.

    Examples:
        .. code-block:: python

            import paddle

            psroi_module = paddle.vision.ops.PSRoIPool(7, 1.0)
            x = paddle.uniform([2, 490, 28, 28], dtype='float32')
            boxes = paddle.to_tensor([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], dtype='float32')
            boxes_num = paddle.to_tensor([1, 2], dtype='int32')
            pool_out = psroi_module(x, boxes, boxes_num)
            print(pool_out.shape) # [3, 10, 7, 7]
    r*   c                       t t|   || _|| _d S r]   )r   r%   r   r   r   r   r   r   r   rL   rM   r        
zPSRoIPool.__init__c                 C   s   t |||| j| jS r]   )r$   r   r   r   r3   rV   r   rL   rL   rM   r     s   zPSRoIPool.forwardr*   r   rL   rL   r   rM   r%   c  s    !r%   c                 C   s  t |dttfd t|tr||f}|\}}t r+|dus!J dt| |||||S t rG|dus6J dt| ||d|d|d|	\}}	|S t	| dd	gd t	|d
d	gd t
di t }
|
 }|
|}|
jdd}	| |d}|dur{||d< |
jd|||	d|||dd |S )a  
    This operator implements the roi_pooling layer.
    Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
    The operator has three steps: 1. Dividing each region proposal into equal-sized sections with output_size(h, w) 2. Finding the largest value in each section 3. Copying these max values to the output buffer
    For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn.

    Args:
        x (Tensor): input feature, 4D-Tensor with the shape of [N,C,H,W],
            where N is the batch size, C is the input channel, H is Height, W is weight.
            The data type is float32 or float64.
        boxes (Tensor): boxes (Regions of Interest) to pool over.
            2D-Tensor with the shape of [num_boxes,4].
            Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates,
            and (x2, y2) is the bottom right coordinates.
        boxes_num (Tensor): the number of RoIs in each image, data type is int32. Default: None
        output_size (int or tuple[int, int]): the pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
        spatial_scale (float, optional): multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
        name(str, optional): for detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.

    Returns:
        pool_out (Tensor): the pooled feature, 4D-Tensor with the shape of [num_boxes, C, output_size[0], output_size[1]].

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.ops import roi_pool

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            pool_out = roi_pool(data, boxes, boxes_num=boxes_num, output_size=3)
            assert pool_out.shape == [3, 256, 3, 3], ''
    r   r"   N-boxes_num should not be None in dygraph mode.r   r   r   r3   r4   rV   r8   r9   r   r   )r   ZArgmax)r   r   r   r<   )r"   )r   rD   rC   r^   r   r   r"   r   r   r   r   rA   rn   rG   rH   )r3   rV   r   r   r   rI   r   r   Zpool_outZargmaxesrK   r:   r>   rL   rL   rM   r"     s`   &




r"   c                       s2   e Zd ZdZd	 fdd	Zdd Zdd Z  ZS )
r#   ak  
    This interface is used to construct a callable object of the `RoIPool` class. Please
    refer to :ref:`api_paddle_vision_ops_roi_pool`.

    Args:
        output_size (int or tuple[int, int]): the pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
        spatial_scale (float, optional): multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0.

    Returns:
        pool_out (Tensor): the pooled feature, 4D-Tensor with the shape of [num_boxes, C, output_size[0], output_size[1]].

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.ops import RoIPool

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            roi_pool = RoIPool(output_size=(4, 3))
            pool_out = roi_pool(data, boxes, boxes_num)
            assert pool_out.shape == [3, 256, 4, 3], ''
    r*   c                    r   r]   )r   r#   r   _output_size_spatial_scaler   r   rL   rM   r   
  r   zRoIPool.__init__c                 C   s   t |||| j| jdS )N)r3   rV   r   r   r   )r"   r   r   r   rL   rL   rM   r     s   zRoIPool.forwardc                 C   s   d}|j di | jS )Nz:output_size={_output_size}, spatial_scale={_spatial_scale}rL   )format__dict__)r   Zmain_strrL   rL   rM   
extra_repr  s   zRoIPool.extra_reprr   )r   r   r   r   r   r   r   r   rL   rL   r   rM   r#     s
    	r#   c                 C   s  t |dttfd t|tr||f}|\}}	t r-|dus!J dt| ||||	|||S t rK|dus8J dt| ||d|d|	d|d|d	|}
|
S t	| d
ddgd t	|dddgd t
di t }| }||}
| |d}|dur{||d< |jd|d|
i||	|||dd |
S )ax  
    Implementing the roi_align layer.
    Region of Interest (RoI) Align operator (also known as RoI Align) is to
    perform bilinear interpolation on inputs of nonuniform sizes to obtain
    fixed-size feature maps (e.g. 7*7), as described in Mask R-CNN.

    Dividing each region proposal into equal-sized sections with the pooled_width
    and pooled_height. Location remains the origin result.

    In each ROI bin, the value of the four regularly sampled locations are
    computed directly through bilinear interpolation. The output is the mean of
    four locations. Thus avoid the misaligned problem.

    Args:
        x (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],
            where N is the batch size, C is the input channel, H is Height,
            W is weight. The data type is float32 or float64.
        boxes (Tensor): Boxes (RoIs, Regions of Interest) to pool over. It
            should be a 2-D Tensor of shape (num_boxes, 4). The data type is
            float32 or float64. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
            the top left coordinates, and (x2, y2) is the bottom right coordinates.
        boxes_num (Tensor): The number of boxes contained in each picture in
            the batch, the data type is int32.
        output_size (int or Tuple[int, int]): The pooled output size(h, w), data
            type is int32. If int, h and w are both equal to output_size.
        spatial_scale (float32, optional): Multiplicative spatial scale factor to translate
            ROI coords from their input scale to the scale used when pooling.
            Default: 1.0.
        sampling_ratio (int32, optional): number of sampling points in the interpolation
            grid used to compute the output value of each pooled output bin.
            If > 0, then exactly ``sampling_ratio x sampling_ratio`` sampling
            points per bin are used.
            If <= 0, then an adaptive number of grid points are used (computed
            as ``ceil(roi_width / output_width)``, and likewise for height).
            Default: -1.
        aligned (bool, optional): If False, use the legacy implementation. If True, pixel
            shift the box coordinates it by -0.5 for a better alignment with the
            two neighboring pixel indices. This version is used in Detectron2.
            Default: True.
        name(str, optional): For detailed information, please refer to :
            ref:`api_guide_Name`. Usually name is no need to set and None by
            default.

    Returns:
        The output of ROIAlignOp is a 4-D tensor with shape (num_boxes,
            channels, pooled_h, pooled_w). The data type is float32 or float64.

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.ops import roi_align

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            align_out = roi_align(data, boxes, boxes_num, output_size=3)
            assert align_out.shape == [3, 256, 3, 3]
    r   r&   Nr   r   r   r   sampling_ratioalignedr3   r4   r5   rV   r   r   r   )r   r   r   r   r   r<   )r&   )r   rD   rC   r^   r   r   r&   r   r   r   r   rA   rn   rG   rH   )r3   rV   r   r   r   r   r   rI   r   r   Z	align_outrK   r:   r>   rL   rL   rM   r&     s|   H




r&   c                       s,   e Zd ZdZd fdd	Zd	ddZ  ZS )
r'   a  
    This interface is used to construct a callable object of the `RoIAlign` class.
    Please refer to :ref:`api_paddle_vision_ops_roi_align`.

    Args:
        output_size (int or tuple[int, int]): The pooled output size(h, w),
            data type is int32. If int, h and w are both equal to output_size.
        spatial_scale (float32, optional): Multiplicative spatial scale factor
            to translate ROI coords from their input scale to the scale used
            when pooling. Default: 1.0

    Returns:
        The output of ROIAlign operator is a 4-D tensor with
            shape (num_boxes, channels, pooled_h, pooled_w).

    Examples:
        ..  code-block:: python

            import paddle
            from paddle.vision.ops import RoIAlign

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            roi_align = RoIAlign(output_size=(4, 3))
            align_out = roi_align(data, boxes, boxes_num)
            assert align_out.shape == [3, 256, 4, 3]
    r*   c                    r   r]   )r   r'   r   r   r   r   r   rL   rM   r     r   zRoIAlign.__init__Tc                 C   s   t |||| j| j|dS )N)r3   rV   r   r   r   r   )r&   r   r   )r   r3   rV   r   r   rL   rL   rM   r     s   zRoIAlign.forwardr   )Tr   rL   rL   r   rM   r'     s    r'   c                       s2   e Zd ZdZddddeeddf fdd	Z  ZS )ConvNormActivationaG  
    Configurable block used for Convolution-Normalzation-Activation blocks.
    This code is based on the torchvision code with modifications.
    You can also see at https://github.com/pytorch/vision/blob/main/torchvision/ops/misc.py#L68
    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the Convolution-Normalzation-Activation block
        kernel_size: (int|list|tuple, optional): Size of the convolving kernel. Default: 3
        stride (int|list|tuple, optional): Stride of the convolution. Default: 1
        padding (int|str|tuple|list, optional): Padding added to all four sides of the input. Default: None,
            in wich case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., paddle.nn.Layer], optional): Norm layer that will be stacked on top of the convolutiuon layer.
            If ``None`` this layer wont be used. Default: ``paddle.nn.BatchNorm2D``
        activation_layer (Callable[..., paddle.nn.Layer], optional): Activation function which will be stacked on top of the normalization
            layer (if not ``None``), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``paddle.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
       rh   Nc              
      s|   |d u r|d d |	 }|
d u r|d u }
t ||||||	||
dg}|d ur,||| |d ur6||  t j|  d S )Nrh   r   )r   r   r   )r   appendr   r   )r   r   r   r   r~   r   r   Z
norm_layerZactivation_layerr   r   r   r   rL   rM   r     s(   zConvNormActivation.__init__)r   r   r   r   r   r   r   r   rL   rL   r   rM   r     s    r   333333?c                 C   s  dd }|du r|| |S ddl }|du r'|j|dd}|| | |}	||	 S |dur6||jd ks6J d|dus>J d|j||jd	}
|D ]T}|||kd }|jd }|||g}|dkrdqH|d
krmd
|
|< qH| | }|| }|j|dd}|| }|||| }|j|| |jd	}|j|
|| |dd}
qH||
d }|jd }|||g}|j|| dd}|du r|| S t	 r||k r|n|}|
|| |\}}|| S || d| S )u,  
    This operator implements non-maximum suppression. Non-maximum suppression (NMS)
    is used to select one bounding box out of many overlapping bounding boxes in object detection.
    Boxes with IoU > iou_threshold will be considered as overlapping boxes,
    just one with highest score can be kept. Here IoU is Intersection Over Union,
    which can be computed by:

    ..  math::

        IoU = \frac{intersection\_area(box1, box2)}{union\_area(box1, box2)}

    If scores are provided, input boxes will be sorted by their scores firstly.

    If category_idxs and categories are provided, NMS will be performed with a batched style,
    which means NMS will be applied to each category respectively and results of each category
    will be concated and sorted by scores.

    If K is provided, only the first k elements will be returned. Otherwise, all box indices sorted by scores will be returned.

    Args:
        boxes(Tensor): The input boxes data to be computed, it's a 2D-Tensor with
            the shape of [num_boxes, 4]. The data type is float32 or float64.
            Given as [[x1, y1, x2, y2], …],  (x1, y1) is the top left coordinates,
            and (x2, y2) is the bottom right coordinates.
            Their relation should be ``0 <= x1 < x2 && 0 <= y1 < y2``.
        iou_threshold(float32, optional): IoU threshold for determine overlapping boxes. Default value: 0.3.
        scores(Tensor, optional): Scores corresponding to boxes, it's a 1D-Tensor with
            shape of [num_boxes]. The data type is float32 or float64. Default: None.
        category_idxs(Tensor, optional): Category indices corresponding to boxes.
            it's a 1D-Tensor with shape of [num_boxes]. The data type is int64. Default: None.
        categories(List, optional): A list of unique id of all categories. The data type is int64. Default: None.
        top_k(int64, optional): The top K boxes who has higher score and kept by NMS preds to
            consider. top_k should be smaller equal than num_boxes. Default: None.

    Returns:
        Tensor: 1D-Tensor with the shape of [num_boxes]. Indices of boxes kept by NMS.

    Examples:
        .. code-block:: python

            import paddle

            boxes = paddle.rand([4, 4]).astype('float32')
            boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
            boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
            print(boxes)
            # Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [[0.64811575, 0.89756244, 0.86473107, 1.48552322],
            #         [0.48085716, 0.84799081, 0.54517937, 0.86396021],
            #         [0.62646860, 0.72901905, 1.17392159, 1.69691563],
            #         [0.89729202, 0.46281594, 1.88733089, 0.98588502]])

            out = paddle.vision.ops.nms(boxes, 0.1)
            print(out)
            # Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
            #        [0, 1, 3])

            scores = paddle.to_tensor([0.6, 0.7, 0.4, 0.233])

            categories = [0, 1, 2, 3]
            category_idxs = paddle.to_tensor([2, 0, 0, 3], dtype="int64")

            out = paddle.vision.ops.nms(boxes,
                                        0.1,
                                        paddle.to_tensor(scores),
                                        paddle.to_tensor(category_idxs),
                                        categories,
                                        4)
            print(out)
            # Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
            #        [1, 0, 2, 3])
    c                 S   sb   t  r	t| |S t rt| d|S tdi t }|d}|jdd| id|id|id |S )Niou_thresholdr(   Zint64rT   ZKeepBoxesIdxsr<   )r(   )	r   r   r(   r   r   r   rA   rG   rH   )rV   r   rK   r   rL   rL   rM   _nmsd  s   
znms.<locals>._nmsNr   T)Z
descendingz6top_k should be smaller equal than the number of boxeszaif category_idxs is given, categories which is a list of unique id of all categories is necessaryr9   rh   )	overwrite)paddleZargsortr   Z
zeros_liker8   whereZreshapeZ	ones_likeZscatterr   Ztopk)rV   r   rW   Zcategory_idxs
categoriesZtop_kr   r   Zsorted_global_indicesZsorted_keep_boxes_indicesr   Zcategory_idZcur_category_boxes_idxsr   Zcur_category_boxesZcur_category_scoresZcur_category_sorted_indicesZcur_category_sorted_boxesZ cur_category_keep_boxes_sub_idxsZupdatesZkeep_boxes_idxsZsorted_sub_indicesrJ   Ztopk_sub_indicesrL   rL   rM   r(     s~   Q



r(   p    rX   c                 C   s  t  r&|s	J d|||||	|
f}tj| ||||g|R  \}}}|||fS t rR|s/J dd|d|d|d|d|	d|
f}tj| ||||g|R  \}}}|||fS tdi t }t| d	d
gd t|dd
gd t|dd
dgd t|dd
gd t|dd
gd |j|j	d}|j| j	d}||d}|r|jdd}d|_
||d< |jd| ||||d|||||	|
d|d d|_
d|_
|sd}|||fS )a  
    This operation proposes RoIs according to each box with their
    probability to be a foreground object. And
    the proposals of RPN output are  calculated by anchors, bbox_deltas and scores. Final proposals
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

    1. Transpose and resize scores and bbox_deltas in size of
       (H * W * A, 1) and (H * W * A, 4)
    2. Calculate box locations as proposals candidates.
    3. Clip boxes to image
    4. Remove predicted boxes with small area.
    5. Apply non-maximum suppression (NMS) to get final proposals as output.

    Args:
        scores (Tensor): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
            width of the feature map. The data type must be float32.
        bbox_deltas (Tensor): A 4-D Tensor with shape [N, 4*A, H, W]
            represents the difference between predicted box location and
            anchor location. The data type must be float32.
        img_size (Tensor): A 2-D Tensor with shape [N, 2] represents origin
            image shape information for N batch, including height and width of the input sizes.
            The data type can be float32 or float64.
        anchors (Tensor):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances (Tensor): A 4-D Tensor. The expanded variances of anchors with a layout of
            [H, W, num_priors, 4]. Each variance is in
            (xcenter, ycenter, w, h) format. The data type must be float32.
        pre_nms_top_n (float, optional): Number of total bboxes to be kept per
            image before NMS. `6000` by default.
        post_nms_top_n (float, optional): Number of total bboxes to be kept per
            image after NMS. `1000` by default.
        nms_thresh (float, optional): Threshold in NMS. The data type must be float32. `0.5` by default.
        min_size (float, optional): Remove predicted boxes with either height or
            width less than this value. `0.1` by default.
        eta(float, optional): Apply in adaptive NMS, only works if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration. 1.0 by default.
        pixel_offset (bool, optional): Whether there is pixel offset. If True, the offset of `img_size` will be 1. 'False' by default.
        return_rois_num (bool, optional): Whether to return `rpn_rois_num` . When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
            num of each image in one batch. 'False' by default.
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

    Returns:
        - rpn_rois (Tensor): The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - rpn_roi_probs (Tensor): The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - rpn_rois_num (Tensor): Rois's num of each image in one batch. 1-D Tensor with shape ``[B,]`` while ``B`` is the batch size. And its sum equals to RoIs number ``N`` .

    Examples:
        .. code-block:: python

            import paddle

            scores = paddle.rand((2,4,5,5), dtype=paddle.float32)
            bbox_deltas = paddle.rand((2, 16, 5, 5), dtype=paddle.float32)
            img_size = paddle.to_tensor([[224.0, 224.0], [224.0, 224.0]])
            anchors = paddle.rand((2,5,4,4), dtype=paddle.float32)
            variances = paddle.rand((2,5,10,4), dtype=paddle.float32)
            rois, roi_probs, roi_nums = paddle.vision.ops.generate_proposals(scores, bbox_deltas,
                         img_size, anchors, variances, return_rois_num=True)
            print(rois, roi_probs, roi_nums)
    z/return_rois_num should be True in dygraph mode.pre_nms_topNpost_nms_topN
nms_threshmin_sizeetar   generate_proposals_v2rW   r4   bbox_deltasrS   r5   r+   rc   r9   )ZRpnRoisZRpnRoiProbsr8   TZ
RpnRoisNum)rU   Z
BboxDeltasZImShapeZAnchorsrm   )r   r   r   r   r   r   r|   N)r   )r   r   r   r   r   r   rA   r   rG   r:   rr   rH   )rW   r   rS   r+   rc   Zpre_nms_top_nZpost_nms_top_nr   r   r   r   return_rois_numrI   r@   Zrpn_roisZrpn_roi_probsZrpn_rois_numrK   r?   rL   rL   rM   r     s   T








r   r   c                 C   s  t | dddgd t |dddgd t|dtd t|dtd t|dtd t|d	td t|	d
td t|dtd t|dtd t|dtd t rkt| |||||||||	
\}}}|
sbd}|sfd}|||fS t rd|d|d|d|d|d|d	|d
|	f}t	j| |g|R  \}}}|
sd}|sd}|||fS t
di t }|j| jd}|jdd}||d}|r|jdd}||d< |jd| |d||||||||	d|d d|_|
sd}|sd}|||fS )aA  
    This operator does matrix non maximum suppression (NMS).
    First selects a subset of candidate bounding boxes that have higher scores
    than score_threshold (if provided), then the top k candidate is selected if
    nms_top_k is larger than -1. Score of the remaining candidate are then
    decayed according to the Matrix NMS scheme.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.
    Args:
        bboxes (Tensor): A 3-D Tensor with shape [N, M, 4] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Tensor): A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is
                           number of bounding boxes. For each category there
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes. The data type is float32 or float64.
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score.
        post_threshold (float): Threshold to filter out bounding boxes with
                                low confidence score AFTER decaying.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences after the filtering detections based
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        use_gaussian (bool): Use Gaussian as the decay function. Default: False
        gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        normalized (bool): Whether detections are normalized. Default: True
        return_index(bool): Whether return selected index. Default: False
        return_rois_num(bool): whether return rois_num. Default: True
        name(str): Name of the matrix nms op. Default: None.
    Returns:
        A tuple with three Tensor: (Out, Index, RoisNum) if return_index is True,
        otherwise, a tuple with two Tensor (Out, RoisNum) is returned.
        Out (Tensor): A 2-D Tensor with shape [No, 6] containing the
             detection results.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
        Index (Tensor): A 2-D Tensor with shape [No, 1] containing the
            selected indices, which are absolute values cross batches.
        rois_num (Tensor): A 1-D Tensor with shape [N] containing
            the number of detected boxes in each image.
    Examples:
        .. code-block:: python
            import paddle
            from paddle.vision.ops import matrix_nms
            boxes = paddle.rand([4, 1, 4])
            boxes[..., 2] = boxes[..., 0] + boxes[..., 2]
            boxes[..., 3] = boxes[..., 1] + boxes[..., 3]
            scores = paddle.rand([4, 80, 1])
            out = matrix_nms(bboxes=boxes, scores=scores, background_label=0,
                                 score_threshold=0.5, post_threshold=0.1,
                                 nms_top_k=400, keep_top_k=200, normalized=False)
    BBoxesr4   r5   r)   rU   score_thresholdpost_thresholdZ
nums_top_k
keep_top_k
normalizeduse_gaussiangaussian_sigmabackground_labelN	nms_top_kr9   r8   )r   Indexr   )r   rU   )r   r   r   r   r   r   r   r   r|   T)r)   )r   r   rE   rD   rF   r   r   r)   r   r   r   rA   rG   r:   rH   rr   )ZbboxesrW   r   r   r   r   r   r   r   r   Zreturn_indexr   rI   r   indexr   r@   rK   outputr?   rL   rL   rM   r)   |	  s   M





r)   )NTNr*   )TNr*   FrN   )rw   Tr   N)Nrh   r   rh   rh   rh   NN)FNNr]   )r   N)r*   N)r*   r   TN)r   NNNN)r   r   rN   rX   r*   FFN)Fr   r   TFTN)2numpyr   Zfluid.layer_helperr   Zfluid.data_feederr   r   r   Zfluidr   r   Zfluid.layersr	   r
   r   r   r   r   r   Zfluid.initializerr   Zfluid.frameworkr   r   r   r   Zpaddle.common_ops_importr   r   r   __all__r   r   r   r   r   r   r   r    r!   r$   r%   r"   r#   r&   r'   r   r(   r   r)   rL   rL   rL   rM   <module>   s   
 o
 G
 1
 N
 o O
 
!
-
2W
-^4
 0;
 3
 E