U
    (d=                     @   s   d dl mZmZmZmZ d dlZd dlmZmZ d dlm	Z
 d dlmZ d dlmZ ddlmZ dd	lmZ dd
lmZ G dd dejZeeeeeeedddZee ee eeef dddZG dd dejjZdS )    )ListOptionalDictTupleN)nnTensor)
functional)Conv2dNormActivation)boxes   )_utils)AnchorGenerator)	ImageListc                       s`   e Zd ZdZdZdeedd fddZ fdd	Zee	 e
ee	 ee	 f d
ddZ  ZS )RPNHeada  
    Adds a simple RPN Head with classification and regression heads

    Args:
        in_channels (int): number of channels of the input feature
        num_anchors (int): number of anchors to be predicted
        conv_depth (int, optional): number of convolutions
       r   N)in_channelsnum_anchorsreturnc              	      s   t    g }t|D ]}|t||dd d qtj| | _tj||ddd| _	tj||d ddd| _
|  D ]@}t|tjrrtjjj|jdd |jd k	rrtjj|jd qrd S )	N   )kernel_sizeZ
norm_layerr   )r   Zstride   g{Gz?)Zstdr   )super__init__rangeappendr	   r   Z
SequentialconvZConv2d
cls_logits	bbox_predmodules
isinstancetorchinitZnormal_weightbiasZ	constant_)selfr   r   Z
conv_depthZconvs_layer	__class__ D/tmp/pip-unpacked-wheel-vx7f76es/torchvision/models/detection/rpn.pyr      s    

zRPNHead.__init__c              	      st   | dd }|d ks|dk rXdD ]6}	| d|	 }
| d|	 }|
|kr ||
||< q t ||||||| d S )Nversionr   )r"   r#   zconv.z	conv.0.0.)getpopr   _load_from_state_dict)r$   Z
state_dictprefixZlocal_metadatastrictZmissing_keysZunexpected_keysZ
error_msgsr+   typeZold_keyZnew_keyr'   r)   r*   r.   +   s     
zRPNHead._load_from_state_dict)xr   c                 C   sD   g }g }|D ].}|  |}|| | || | q||fS )N)r   r   r   r   )r$   r2   ZlogitsZbbox_regZfeaturetr)   r)   r*   forwardH   s    
zRPNHead.forward)r   )__name__
__module____qualname____doc___versionintr   r.   r   r   r   r4   __classcell__r)   r)   r'   r*   r      s
   	r   )r&   NACHWr   c                 C   s6   |  |d|||} | ddddd} | |d|} | S )Nr   r   r   r   r   )viewZpermutereshape)r&   r<   r=   r>   r?   r@   r)   r)   r*   permute_and_flattenR   s    rD   )box_clsbox_regressionr   c                 C   s   g }g }t | |D ]h\}}|j\}}}}	|jd }
|
d }|| }t||||||	}|| t|||d||	}|| qtj|dddd} tj|dddd}| |fS )Nr   r   Zdimr   rA   )zipshaperD   r   r    catflattenrC   )rE   rF   Zbox_cls_flattenedZbox_regression_flattenedZbox_cls_per_levelZbox_regression_per_levelr<   ZAxCr?   r@   ZAx4r=   r>   r)   r)   r*   concat_box_prediction_layersY   s    

rM   c                       sb  e Zd ZdZejejejdZde	e
jeeeeeeef eeef eedd fddZedd	d
ZedddZee eeeef  eee ee f dddZeee edddZeeeeeef  ee eee ee f dddZeeee ee eeef dddZdeeeef eeeeef   eee eeef f dddZ  ZS )RegionProposalNetworkah  
    Implements Region Proposal Network (RPN).

    Args:
        anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature
            maps.
        head (nn.Module): module that computes the objectness and regression deltas
        fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be
            considered as positive during training of the RPN.
        bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be
            considered as negative during training of the RPN.
        batch_size_per_image (int): number of anchors that are sampled during training of the RPN
            for computing the loss
        positive_fraction (float): proportion of positive anchors in a mini-batch during training
            of the RPN
        pre_nms_top_n (Dict[str, int]): number of proposals to keep before applying NMS. It should
            contain two fields: training and testing, to allow for different values depending
            on training or evaluation
        post_nms_top_n (Dict[str, int]): number of proposals to keep after applying NMS. It should
            contain two fields: training and testing, to allow for different values depending
            on training or evaluation
        nms_thresh (float): NMS threshold used for postprocessing the RPN proposals

    )	box_coderproposal_matcherfg_bg_sampler        N)anchor_generatorheadfg_iou_threshbg_iou_threshbatch_size_per_imagepositive_fractionpre_nms_top_npost_nms_top_n
nms_threshscore_threshr   c                    sn   t    || _|| _tjdd| _tj| _	tj
||dd| _t||| _|| _|| _|	| _|
| _d| _d S )N)      ?r]   r]   r]   )weightsT)Zallow_low_quality_matchesgMbP?)r   r   rS   rT   	det_utilsBoxCoderrO   box_opsZbox_ioubox_similarityMatcherrP   BalancedPositiveNegativeSamplerrQ   _pre_nms_top_n_post_nms_top_nr[   r\   min_size)r$   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r'   r)   r*   r      s     
zRegionProposalNetwork.__init__)r   c                 C   s   | j r| jd S | jd S NtrainingZtesting)ri   re   r$   r)   r)   r*   rY      s    
z#RegionProposalNetwork.pre_nms_top_nc                 C   s   | j r| jd S | jd S rh   )ri   rf   rj   r)   r)   r*   rZ      s    
z$RegionProposalNetwork.post_nms_top_n)anchorstargetsr   c                 C   s   g }g }t ||D ]\}}|d }| dkrd|j}tj|jtj|d}	tj|jd ftj|d}
nd| ||}| |}||j	dd }	|dk}
|
j
tjd}
|| jjk}d|
|< || jjk}d|
|< ||
 ||	 q||fS )Nr
   r   dtypedevice)min)rn   rR   g      )rI   numelro   r    zerosrJ   Zfloat32rb   rP   clamptoZBELOW_LOW_THRESHOLDZBETWEEN_THRESHOLDSr   )r$   rk   rl   labelsmatched_gt_boxesZanchors_per_imageZtargets_per_imageZgt_boxesro   Zmatched_gt_boxes_per_imageZlabels_per_imageZmatch_quality_matrixZmatched_idxsZ
bg_indicesZinds_to_discardr)   r)   r*   assign_targets_to_anchors   s(    

z/RegionProposalNetwork.assign_targets_to_anchors)
objectnessnum_anchors_per_levelr   c           
      C   sl   g }d}| |dD ]H}|jd }t||  d}|j|dd\}}	||	|  ||7 }qtj|ddS )Nr   r   rG   )	splitrJ   r_   Z	_topk_minrY   Ztopkr   r    rK   )
r$   rx   ry   roffsetobr   rY   r%   	top_n_idxr)   r)   r*   _get_top_n_idx   s    

z$RegionProposalNetwork._get_top_n_idx)	proposalsrx   image_shapesry   r   c                    s  |j d }|j | }||d} fddt|D }t|d}|dd|}| ||}tj	| d}|d d d f }	||	|f }||	|f }||	|f }t
|}
g }g }t||
||D ]\}}}}t||}t|| j}|| || ||   }}}t|| jkd }|| || ||   }}}t|||| j}|d |   }|| ||  }}|| || q||fS )Nr   rA   c                    s&   g | ]\}}t j|f|t j d qS )rm   )r    fullZint64).0idxnro   r)   r*   
<listcomp>   s    z:RegionProposalNetwork.filter_proposals.<locals>.<listcomp>r   r   )rJ   ro   detachrC   	enumerater    rK   Z	expand_asr   ZarangeZsigmoidrI   ra   Zclip_boxes_to_imageZremove_small_boxesrg   wherer\   Zbatched_nmsr[   rZ   r   )r$   r   rx   r   ry   
num_imageslevelsr~   Zimage_rangeZ	batch_idxZobjectness_probZfinal_boxesZfinal_scoresr
   scoresZlvlZ	img_shapeZkeepr)   r   r*   filter_proposals   s<    



z&RegionProposalNetwork.filter_proposals)rx   pred_bbox_deltasru   regression_targetsr   c           
      C   s   |  |\}}ttj|ddd }ttj|ddd }tj||gdd}| }tj|dd}tj|dd}tj|| || ddd|  }t|| || }	|	|fS )a  
        Args:
            objectness (Tensor)
            pred_bbox_deltas (Tensor)
            labels (List[Tensor])
            regression_targets (List[Tensor])

        Returns:
            objectness_loss (Tensor)
            box_loss (Tensor)
        r   rG   gqq?sum)betaZ	reduction)	rQ   r    r   rK   rL   FZsmooth_l1_lossrq   Z binary_cross_entropy_with_logits)
r$   rx   r   ru   r   Zsampled_pos_indsZsampled_neg_indsZsampled_indsZbox_lossZobjectness_lossr)   r)   r*   compute_loss+  s$    
z"RegionProposalNetwork.compute_loss)imagesfeaturesrl   r   c                 C   s   t | }| |\}}| ||}t|}dd |D }dd |D }	t||\}}| j| |}
|
	|dd}
| 
|
||j|	\}}i }| jr|dkrtd| ||\}}| j||}| ||||\}}||d}||fS )	a=  
        Args:
            images (ImageList): images for which we want to compute the predictions
            features (Dict[str, Tensor]): features computed from the images that are
                used for computing the predictions. Each tensor in the list
                correspond to different feature levels
            targets (List[Dict[str, Tensor]]): ground-truth boxes present in the image (optional).
                If provided, each element in the dict should contain a field `boxes`,
                with the locations of the ground-truth boxes.

        Returns:
            boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per
                image.
            losses (Dict[str, Tensor]): the losses for the model during training. During
                testing, it is an empty dict.
        c                 S   s   g | ]}|d  j qS )r   )rJ   )r   or)   r)   r*   r   p  s     z1RegionProposalNetwork.forward.<locals>.<listcomp>c                 S   s$   g | ]}|d  |d  |d  qS )r   r   r   r)   )r   sr)   r)   r*   r   q  s     rA   r   Nztargets should not be None)loss_objectnessloss_rpn_box_reg)listvaluesrT   rS   lenrM   rO   decoder   rB   r   Zimage_sizesri   
ValueErrorrw   encoder   )r$   r   r   rl   rx   r   rk   r   Z#num_anchors_per_level_shape_tensorsry   r   r
   r   Zlossesru   rv   r   r   r   r)   r)   r*   r4   S  s4       zRegionProposalNetwork.forward)rR   )N)r5   r6   r7   r8   r_   r`   rc   rd   __annotations__r   r   Modulefloatr:   r   strr   rY   rZ   r   r   r   rw   r   r   r   r   r   r4   r;   r)   r)   r'   r*   rN   r   sZ    

% &:   
, 
rN   )typingr   r   r   r   r    r   r   Ztorch.nnr   r   Ztorchvision.opsr	   r
   ra    r   r_   Zanchor_utilsr   Z
image_listr   r   r   r:   rD   rM   rN   r)   r)   r)   r*   <module>   s   B"