o
    MeÉ  ã                   @   sb   d dl Z d dlm  mZ d dlmZ ddd„Zddd„Zdd	„ Zd
d„ Z			 			ddd„Z
dS )é    N)Ú
collectiveé   c                 C   s2   t |ƒ}|d  |  < t | jƒ|krtdƒ‚d S )Nr   z1The in_tensor for scatter is not correctly-sized.)ÚlistÚshapeÚRuntimeError)Útensorr   ÚnranksZexpect_shape© r	   ú_D:\Projects\ConvertPro\env\Lib\site-packages\paddle/distributed/communication/stream/scatter.pyÚ_check_tensor_shape   s
   ÿr   c                 C   s4   t | ƒ|kr
tdƒ‚| D ]}|j|krtdƒ‚qd S )Nz3The tensor_list for scatter is not correctly-sized.)Úlenr   r   )Útensor_listr   r   r   r	   r	   r
   Ú_check_tensor_list_shape   s   ÿ
ÿÿÿr   c           
      C   sŠ   |d u rt  ¡ n|}| |¡}|dkrtdƒ‚|j}tj ¡ }||kr*t| |j	|ƒ |r4|j
 || |¡S |j
 || ||¡}	|rC|	 ¡  |	S )NéÿÿÿÿúSrc rank out of group.)r   Ú_get_default_groupÚget_group_rankr   r   ÚpaddleÚdistributedÚget_rankr   r   Úprocess_groupZscatter_tensor_on_calc_streamZscatter_tensorÚwait)
Z
out_tensorZ	in_tensorÚsrcÚgroupÚsync_opÚuse_calc_streamÚsrc_rankr   ÚrankÚtaskr	   r	   r
   Ú_scatter_tensor_in_dygraph%   s$   

ÿÿr   c           
         s¶   |d u rt  ¡ n|}| |¡}|dkrtdƒ‚|j}tj ¡ }||kr5t|ƒdkr-tdƒ‚t	|ˆ j
|ƒ n‡ fdd„t|ƒD ƒ}|rJ|j |ˆ |¡S |j |ˆ ||¡}	|rY|	 ¡  |	S )Nr   r   r   z0The tensor_list should not be empty on src rank.c                    s   g | ]}ˆ ‘qS r	   r	   )Ú.0Ú_©r   r	   r
   Ú
<listcomp>N   s    z'_scatter_in_dygraph.<locals>.<listcomp>)r   r   r   r   r   r   r   r   r   r   r   Úranger   Zscatter_on_calc_streamÚscatterr   )
r   r   r   r   r   r   r   r   r   r   r	   r"   r
   Ú_scatter_in_dygraph>   s*   

ÿÿr&   TFc                 C   sv   |dur|  ¡ stdƒ‚|s|rtdƒ‚|du rtdƒ‚t ¡ r7t |¡r.t| |||||ƒS t| |||||ƒS tdƒ‚)a¼  

    Scatter a tensor (or a tensor list) across devices.

    Args:
        tensor (Tensor): The output tensor on each rank. The result will overwrite this tenor after communication. Support
            float16, float32, float64, int32, int64, int8, uint8 or bool as the input data type.
        tensor_or_tensor_list (Union[Tensor, List[Tensor]]): The input to scatter (default is `None`, must be specified on the source rank).
            If it is a tensor, it should be correctly-sized. If it is a list, it should contain correctly-sized tensors.
        src (int, optional): Rank of the source device. If none is given, use `0` as default.
        group (Group, optional): Communicate in which group. If none is given, use the global group as default.
        sync_op (bool, optional): Indicate whether the communication is sync or not. If none is given, use true as default.
        use_calc_stream (bool, optional): Indicate whether the communication is done on calculation stream. If none is given, use false as default. This
            option is designed for high performance demand, be careful to turn it on except you are clearly know its meaning.

    Returns:
        Return a task object.

    Warning:
        This API only supports the dygraph mode now.

    Examples:
        .. code-block:: python

            # required: distributed
            import paddle
            import paddle.distributed as dist

            dist.init_parallel_env()
            if dist.get_rank() == 0:
                data1 = paddle.to_tensor([7, 8, 9])
                data2 = paddle.to_tensor([10, 11, 12])
                dist.stream.scatter(data1, src=1)
            else:
                data1 = paddle.to_tensor([1, 2, 3])
                data2 = paddle.to_tensor([4, 5, 6])
                dist.stream.scatter(data1, [data1, data2], src=1)
            out = data1.numpy()
            # [1, 2, 3] (2 GPUs, out for rank 0)
            # [4, 5, 6] (2 GPUs, out for rank 1)
    NzjThe group should not be None and all ranks which invoke this operation should be the member of this group.z5use_calc_stream can only be true in sync op behavior.zThe input should be specified.zHpaddle.distributed.stream.scatter is only supported in dygraph mode now.)Z	is_memberr   Ú	frameworkZin_dygraph_moder   Z	is_tensorr   r&   )r   Ztensor_or_tensor_listr   r   r   r   r	   r	   r
   r%   [   s,   /ÿÿ
þÿÿr%   )r   )Nr   NTF)r   Zpaddle.fluid.frameworkZfluidr'   Zpaddle.distributedr   r   r   r   r&   r%   r	   r	   r	   r
   Ú<module>   s   


û