
    Ǆgn                     h   U d dl Z d dlZd dlmZ d dlmZmZmZmZm	Z	m
Z
mZmZmZmZmZmZ d dlmZ d dlmZ d dlZd dlZd dlZd dlmZmZmZmZmZ dadad'de fd	Z! G d
 d      Z"d Z#d Z$d Z%d Z&d Z'de"fdZ(d(dee
eejR                  jT                  f      fdZ+e jX                  d(dee   fd       Z-e jX                  d        Z. G d de"      Z/ G d de      Z0de1dee0   fdZ2dedeee0      fdZ3d)dZ4d Z5e G d  d!             Z6e G d" d#             Z7i Z8eee7f   e9d$<   de7fd%Z:d& Z;y)*    N)	dataclass)AnyDictListOptionalSetUnionProtocolTupleSequenceoverloadDequeType)	TypeGuard)deque)_get_dispatch_stack_at_len_torch_dispatch_stack_pop_torch_dispatch_stack_push_on_torch_dispatch_stackDispatchKeyFreturnc                     | rt         S t        S N)_is_in_torch_dispatch_mode$_is_in_non_infra_torch_dispatch_mode)include_infra_modess    d/home/mcse/projects/flask_80/flask-venv/lib/python3.12/site-packages/torch/utils/_python_dispatch.pyis_in_torch_dispatch_moder      s    )<%fBff    c                   R    e Zd ZdZd
dZd ZddZd Zd Ze	d        Z
e	d	        Zy)TorchDispatchModea  
    A ``TorchDispatchMode`` allows you to override the meaning of all
    ``__torch_dispatch__`` overrideable functions within a dynamic scope,
    without having to actually create a tensor subclass or manually
    monkey-patch functions in the PyTorch API.  Some common situations
    where you should use a mode:

        * You want to override the meaning of factory functions, or other
          functions that do not otherwise take a tensor as an argument
          (these cannot be overridden with tensor subclasses).

        * You want to override the behavior of all functions without needing
          to wrap your inputs in tensor subclasses; e.g., if you are just
          interested in logging intermediate computations.

        * You want to control the order of execution of various tensor
          subclasses explicitly, rather than implicitly via the return of
          ``NotImplemented``.

    Independent subclasses of :class:`TorchDispatchMode` are compositional:
    modes can be pushed onto a stack using ``with MyMode():``.
    When you call functions in the PyTorch API inside your
    ``__torch_dispatch__`` implementation, by default, they will forward on to
    the next mode on the mode stack.  If you want recursively call back into
    your current ``__torch_dispatch__`` implementation, either explicitly
    invoke ``self.__torch_dispatch__(...)``, or use the context manager
    ``__torch_dispatch__(self)`` to make PyTorch
    API self-referential (beware of infinite loops, in this case!)
    Nc                     |5t        |t        j                  j                        sJ || j                  d<   t               | _        t               | _        y )N_dispatch_key)
isinstancetorch_Cr   __dict__r   old_dispatch_mode_flags!old_non_infra_dispatch_mode_flags)selfr#   s     r   __init__zTorchDispatchMode.__init__B   sD    $mUXX-A-ABBB-:DMM/*49G$>Cg.r   c                 r    t        | d      st               | _        t        | d      st               | _        y y )Nr(   r)   )hasattrr   r(   r)   r*   s    r   "_lazy_init_old_dispatch_mode_flagsz4TorchDispatchMode._lazy_init_old_dispatch_mode_flagsJ   s2    t678=D(t@ABG'D2 Br   c                     t         r   )NotImplementedErrorr*   functypesargskwargss        r   __torch_dispatch__z$TorchDispatchMode.__torch_dispatch__R   s    !!r   c                     | j                          | j                  j                  t               da| j                  j                  t
               t
        xs | j                          at        |        | S )NT)r/   r(   appendr   r)   r   is_infra_mode
_push_moder.   s    r   	__enter__zTorchDispatchMode.__enter__U   se     	//1$$++,FG%)"..556Z[/S/o[_[m[m[oWo,4r   c                     | j                   j                  dd       }|| j                   j                  dd       }| j                  j                         a| j
                  j                         at        |       y )Nr#   	_mode_key)r'   getr(   popr   r)   r   	_pop_mode)r*   exc_typeexc_valexc_tbmb_dk_or_mode_keys        r   __exit__zTorchDispatchMode.__exit__d   sk     MM--otD$ !% 1 1+t D%)%A%A%E%E%G"/3/U/U/Y/Y/[,#$r   c                 @    t        j                  d        | |i |}|S )NzP`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`)warningswarn)clsr5   r6   instances       r   pushzTorchDispatchMode.pushp   s'    ^	
 ''r   c                      y)NF )rJ   s    r   r:   zTorchDispatchMode.is_infra_modex   s    r   r   rN   N)__name__
__module____qualname____doc__r+   r/   r7   r<   rF   classmethodrL   r:   rN   r   r   r!   r!   #   sJ    <FJ"
%    r   r!   c                  >    t               } | dkD  rt        | dz
        S y Nr      )r   r   )	stack_lens    r   _get_current_dispatch_moderY   ~   s#    )+I1}%i!m44r   c                    | t         j                  j                  j                  t         j                  j                  j                  fv sJ ddlm}  ||       }t         j                  j                  |       }||J ||S |S )Nr   )_get_dispatch_mode_pre_dispatch)r%   r&   _TorchDispatchModeKey
FUNCTIONALPROXY
torch._opsr[   _get_dispatch_mode)keyr[   pre_dispatch_modepost_dispatch_modes       r   _detect_infra_moderd      s    58811<<ehh>\>\>b>bcccc:7 44 %"   !!r   c                     ddl m}m}  ||       }t        j                  j                  |       }|r|rt        d      |r
 ||       }|S |rt        j                  j                  |       S y )Nr   )r[   unset_mode_pre_dispatchzECan't have active infra mode on both pre and post dispatch mode stack)r_   r[   rf   r%   r&   r`   AssertionError_unset_dispatch_mode)ra   r[   rf   rb   rc   modes         r   _unset_infra_moderj      sl    S7<44S9/S
 	
 &s+xx,,S11 r   c              #     K   | t         j                  j                  j                  t         j                  j                  j                  fv sJ t        |       }	 | |t        |       y y # |t        |       w w xY wwr   )r%   r&   r\   r]   r^   rj   r;   )ra   
mode_unsets     r   _disable_infra_moderm      s     &&11&&,,    #3'J#!z" ":!z" "s   ABA. B.A>>Bc                  d    t               } t        |       D cg c]  }t        |       c}S c c}w r   )r   ranger   )rX   is     r    _get_current_dispatch_mode_stackrq      s)    )+I/4Y/?@!"1%@@@s   -ri   c                 P   t        | d      r| j                  nd }|)|t        j                  j                  j
                  k(  sJ |t        |        y ddlm}m	} t        j                  j                  |      } |       D ]  }|D ]  }|j                  |          ||        y )Nr#   r   )_set_mode_pre_dispatchget_cached_ops)r-   r#   r%   r&   r   PreDispatchr   r_   rs   rt   _functionality_to_backend_keys_uncache_dispatch)ri   krs   rt   ksopra   s          r   r;   r;      s    %dO<$A9UXX11=====y%d+A 
	0	0	3B & 	&C  %	&& 4 r   rx   c                     | t         j                  j                  j                  k(  rddlm}  |       S | $t        | t         j                  j                        rt        |       S y )Nr   )_pop_mode_from_pre_dispatch)	r%   r&   r   ru   r_   r|   r$   r\   r   )rx   r|   s     r   rA   rA      sO    EHH  ,,,:*,,yJq%(("@"@A(++ Br   c              #   b   K   t        |       }	 | t        |       y # t        |       w xY wwr   )rA   r;   )rx   olds     r   _pop_mode_temporarilyr      s'     
A,C	3
3s   / /,/c               #     K   ddl m} m} ddlm} ddlm} ddlm}  |        }t        |      D cg c]	  } |        }}d}d}	d}
|D ]-  }t        ||      rd}t        ||      rd}	t        ||      s,d}
/ t               }t        |      D cg c]  }t                }}|D ]N  }t        ||      r|	rt        d      t        ||      r|rt        d	      t        ||      sB|
sEt        d
       	 ||z    t        |      D ]  }t        |        t        |      D ]  }t        |        y c c}w c c}w # t        |      D ]  }t        |        t        |      D ]  }t        |        w xY ww)Nr   )&_len_torch_dispatch_stack_pre_dispatchr|   )FunctionalTensorMode)ProxyTorchDispatchMode)SchemaCheckModeFTzFCan't have FunctionalMode available both in PreDispatch and Python KeyzNCan't have ProxyTorchDispatchMode available both in PreDispatch and Python KeyzGCan't have SchemaCheckMode available both in PreDispatch and Python Key)r_   r   r|   #torch._subclasses.functional_tensorr   "torch.fx.experimental.proxy_tensorr   #torch._subclasses.schema_check_moder   ro   r$   r   rA   rg   reversedr;   )r   r|   r   r   r   mode_len_pre_dispatch_old_pre_dispatch_modeshas_proxy_mode_in_pre_dispatch#has_functional_mode_in_pre_dispatch%has_schema_check_mode_in_pre_dispatchrp   mode_len	old_modesr~   ri   s                   r   _disable_current_modesr      s     IICBD/45J/K*+#%  &+"*/',1)# 9a/0-1*a-.26/a)4819 )*H&+Ho66I6 s013 X  c127U `  sO,5 Y !*$y00Y' 	Dt	34 	Dt	[" 74 Y' 	Dt	34 	Dt	sG   /E:D55E:5E:D:!AE:'E:*E:7D? >AE:?8E77E:c                       e Zd ZddZy)BaseTorchDispatchModeNc                     |i } ||i |S r   rN   r2   s        r   r7   z(BaseTorchDispatchMode.__torch_dispatch__   s    >FT$V$$r   rO   )rP   rQ   rR   r7   rN   r   r   r   r     s    %r   r   c                   z   e Zd ZU deee   ef   fdZede	de	de	de	de
j                  f
d       Ze
j                  j                  ed<   edd
d	dee	df   fd       Zed
e	de	fd       Zde	fdZe	 	 dd	dde
j&                  j(                  dededee
j.                     de
j                  f
d       Ze	 	 	 	 dd	dded   dee
j&                  j(                     dededee
j.                     de
j                  fd       Ze	 	 dd	dde
j                  dededee
j.                     de
j                  f
d       Zy	)TensorWithFlattenr   c                      y r   rN   r.   s    r   __tensor_flatten__z$TensorWithFlatten.__tensor_flatten__(      r   inner_tensorsflatten_spec
outer_sizeouter_stridec                      y r   rN   )r   r   r   r   s       r   __tensor_unflatten__z&TensorWithFlatten.__tensor_unflatten__+      r   shapeNdim.c                      y r   rN   r*   r   s     r   stridezTensorWithFlatten.stride5  r   r   c                      y r   rN   r   s     r   r   zTensorWithFlatten.stride9  r   r   c                      y r   rN   r.   s    r   r   zTensorWithFlatten.dim=  r   r   )memory_formatdtypenon_blockingcopyr   c                     y r   rN   )r*   r   r   r   r   s        r   tozTensorWithFlatten.to@       	r   devicez"torch._prims_common.DeviceLikeTypec                     y r   rN   )r*   r   r   r   r   r   s         r   r   zTensorWithFlatten.toK  s     	r   otherc                     y r   rN   )r*   r   r   r   r   s        r   r   zTensorWithFlatten.toW  r   r   r   )FF)NNFF)rP   rQ   rR   r   r   strobjectr   staticmethodintr%   Tensorr   r&   Size__annotations__r   r   r   r4   _dtypeboolr   r   r   rN   r   r   r   r   '  s)   E(3-*?$@  C s PS cf kpkwkw   88==$ %S/   # #  S   "'	 <@;;%%  	 $E$7$78 
   FJ26!&	 <@	AB	 EKK../	 		
 	 $E$7$78	 
	 	  "'	 <@<<  	 $E$7$78 
 r   r   tc                     t        | t        j                        xr t        |       t        j                  k7  }|xr t	        | d      xr t	        | d      S )aW  
    Returns whether or not a tensor subclass that implements __torch_dispatch__
    is 'traceable' with torch.compile.
    In order for a tensor subclass to support TorchDispatchMode-style tracing in PT2,
    It must implement two magic methods: __tensor_flatten__ and __tensor_unflatten__.
    It is also expected to obey some restrictions around traceability and aliasing:
        * The subclass's __torch_dispatch__() implementation should desugar into pytorch
            dispatcher operations that can be traced into a graph.
        * The subclass should use return_and_correct_aliasing(). This is needed today to make
            sure that torch.compile does the right thing in a few cases around input mutation
            and output aliasing.

    Expected magic method signatures:
        attrs, ctx = t.__tensor_flatten__()
            attrs: list of attribute name strings for inner tensors
            ctx: dict containing any other subclass-specific metadata needed for unflattening

        t = MySubClass.__tensor_unflatten__(inner_tensors, ctx, outer_size, outer_stride)
            inner_tensors: dict mapping attribute name -> tensor for each inner tensor
            ctx: dict with subclass metadata in the form that __tensor_flatten__() produces
            outer_size: expected (possibly symbolic) size that the returned subclass
                instance should have. Note that this arg is useful for certain subclasses
                that require the shape info to be constructed. In most cases, this arg can be
                safely ignored.
            outer_stride: expected (possibly symbolic) stride that the returned subclass
                instance should have. Note that this arg is useful for certain subclasses
                that require the stride info to be constructed. In most cases, this arg can be
                safely ignored.
    r   r   )r$   r%   r   typer-   )r   is_subclasss     r   is_traceable_wrapper_subclassr   e  sN    < Q-I$q'U\\2IK 	/A+,	/A-.r   c                     t        | t        j                        xr/ | t        j                  k7  xr t        | d      xr t        | d      S )z@Same as above, but takes a type argument instead of an instance.r   r   )
issubclassr%   r   r-   )r   s    r   "is_traceable_wrapper_subclass_typer     sI    q%,,' XA,= X/0X5<Q@V5WYr   c                    ||n| j                         }||n| j                         }| j                         \  }}i }|D ]  } ||t        | |            ||<    t	        |       j                  ||||      }|j                  |k(  s#J dt	        |        d| d|j                          |j                         |k(  s'J dt	        |        d| d|j                                 |S )a  
    Given a traceable, wrapper tensor subclass ``t`` that implements
    ``__torch_dispatch__`` and holds some inner tensors,
    and a callback of type ``Callable[[str, torch.Tensor], torch.Tensor]``,
    `transform_subclass` will construct a fresh instance of the wrapper tensor subclass.
    It will do so by grabbing each inner tensor attribute from the wrapper,
    passing them into ``callback`` to get a transformed tensor,
    and putting each transformed tensor into the fresh tensor subclass instance.

    Note: this function will not handle ensuring that the fresh subclass
    gets the same (autograd, and aliasing) metadata as the original tensor.
    This is generally handled in other subsystems like AOTAutograd.
    zExpected return value from z.__tensor_unflatten__() to have shape equal to z, but got: z/__tensor_unflatten__() to have stride equal to )sizer   r   getattrr   r   r   )	r   callbackr   r   attrsctxtransformed_tensors_dictattrsubs	            r   transform_subclassr     s     *51668J#/#;<L%%'JE3! J)1$48H)I &J
q'
&
& #z<C 99
" 
%d1gY /$[	=" ::<<' 
%d1gY /'.CJJL>	C'
 Jr   c                 *    t         t        j                  j                        sJ t        |t              sJ t        |t
        t        f      sJ t        j                  j                  j                  |      } fd}d }t         j                  j                        }t         j                  j                        }t        |      D ]E  }	t        |      D ]5  }
 ||j                  |	   |j                  |
         s' |||	   ||
          7 G y)a  
    Given: an OpOverload, a SchemaInfo (cached information from torchgen about schema),
    and the inputs/outputs to the OpOverload,
    this function checks to see if func is a view operator
    (by checking if any of the outputs in the op's schema
     are immutable aliases of inputs).
    If so, this function manually aliases the storage of the output tensor
    with its corresponding input tensor alias.
    It does this by unsafely overwriting the storage field of the output tensor
    to be the same storage as the input.
    c           
         t        |       st        |      r_t        |t              r|n|g}|D ]E  }t        |       t        |      k(  rJ dt	               dt        |        dt        |       d        t        |t              r|D ]  }t        j                  ||         y t        |t
        j                        sJ dt        |              t        j                  ||        y )NzCalled z with input of type z
and output of type z. But expected types to match.ztype: )r   r$   listr   r   r%   _functionalize_unsafe_setr   )argretret_listrr3   s       r   alias_non_inplace_storagez<_correct_storage_aliasing.<locals>.alias_non_inplace_storage  s     )-1Ns1S(d3s#H @CyD%  @s4yk)=d3i[ II;<@@ @  c4  8//378 c5<<0FF49+2FF0++C5r   c                 n    | j                   |j                   z  }t        |      dkD  xr | j                   S )Nr   )	alias_setlenis_write)r   r   shared_aliasess      r   is_read_only_alias_matchz;_correct_storage_aliasing.<locals>.is_read_only_alias_match  s0    6>"Q&;s||+;;r   N)r$   r%   _ops
OpOverloadtupler   utils_pytreetree_leavesr   _schema	argumentsreturnsro   r5   outs)r3   schema_infor5   r   	flat_outsr   r   num_argsnum_returnsarg_idx
return_idxs   `          r   _correct_storage_aliasingr     s     dEJJ11222dE"""dT5M***##//5I6@< 4<<))*Hdll**+K? K, 	KJ'  );+;+;J+G *$w-j9IJ		KKr   c                   8    e Zd ZU ee   ed<   eed<   ee   ed<   y)	AliasInfor   r   nameN)rP   rQ   rR   r   r   r   r   r   rN   r   r   r   r     s    3xN
3-r   r   c                   .    e Zd ZU ee   ed<   ee   ed<   y)
SchemaInfor5   r   N)rP   rQ   rR   r   r   r   rN   r   r   r   r     s    
y/
y/r   r   parsed_schema_mapc                 0   | t         v r	t         |    S | j                  dk(  rt        | j                        }|j	                  d      sJ |dd  }dd l}|j                  dd|      }|j                  dd|      }|j                  d	d
      }t        j                  j                  j                  |      }|j                  j                  D cg c]q  }t        |j                  
t!               nt!        |j                  j"                        |j                  d uxr |j                  j$                  |j&                        s }}|j(                  D cg c]q  }t        |j                  
t!               nt!        |j                  j"                        |j                  d uxr |j                  j$                  |j&                        s }}n | j                  j                  D cg c]q  }t        |j*                  
t!               nt!        |j*                  j,                        |j*                  d uxr |j*                  j$                  |j&                        s }}| j                  j(                  D cg c]q  }t        |j*                  
t!               nt!        |j*                  j,                        |j*                  d uxr |j*                  j$                  |j&                        s }}t/        ||      }|t         | <   |S c c}w c c}w c c}w c c}w )Natenzaten::   r   z=\[[0, ]+\]z=0z=\[[1, ]+\]z=1z=[0, 1]z=[0,1])r   r   r   )r5   r   )r   	namespacer   r   
startswithrer   replacetorchgenmodelFunctionSchemaparser   flat_allr   
annotationsetr   r   r   r   
alias_info
before_setr   )r3   torchgen_schema_strr   torchgen_schemaaarg_schemasout_schemasr   s           r   get_alias_infor	    s      && ~~!$,,/"--h777 2!"5 !ff^T;NO ff^T;NO199)XN"..77==>QR %..77	
  \\1CEs1<<;Q;Q7RT1Kall6K6KVV	
 	
$ %,,	
  \\1CEs1<<;Q;Q7RT1Kall6K6KVV	
 	
( \\++	
  \\1CEs1<<;R;R7ST1Kall6K6KVV	
 	
$ \\))	
  \\1CEs1<<;R;R7ST1Kall6K6KVV	
 	
 +K@K)dY	
	
	
	
s   A6LA6L	$A6L4A6Lc                 ,    t               }d  fd}t         ||t        |t              s|fn|       t        j
                  j                   j                  v rt        |      D cg c]  \  }} |j                  |         | }}}t        |      dk(  sJ ddlm}	 t        |d   |	      st        j                  j                  j                         5  t        j                   j#                         }
t        j                   j%                  d       	   |i | t        j                   j%                  |
       	 ddd       t'        fd|j(                  D              s|S t+        fd	|j(                  D              s!t-        d
t/         j0                        z         t         j0                  j2                        dk(  r | |j(                  d         |||      S  t5        |      t7        t        |j(                        |      D cg c]0  \  \  }}} |       | |j(                  |         |||      n|2 c}}}      }|S c c}}w # t        j                   j%                  |
       w xY w# 1 sw Y   AxY wc c}}}w )aZ  
    This function should be used by wrapper tensor ``__torch_dispatch__`` subclasses
    that would like to work with torch.compile. It ensures that the subclass
    properly implements the aliasing behavior of every op,
    which is needed for correctness in AOTAutograd.
    This function will handle:

        * When we see a view op, we will alias the storages of any
          input and output tensor subclasses

        * When we see an inplace or out= op, we will directly
          return the corresponding input tensor, instead of returning
          a (potentially) fresh output tensor.
    c                     t        | j                        dk(  ry t        | j                        }t        |      dk(  sJ | j                  r|d   S y rV   )r   r   r   r   )xr   s     r   get_write_aliasz4return_and_correct_aliasing.<locals>.get_write_aliasd  sJ    q{{q %	9~"""::Q<r   c                 ~   t         j                  j                  j                  ||      \  }}t	        |j
                        D cg c]  \  }}| |j                  v s| }}}t        |      dk(  sJ |d   }	|j
                  |	   }
|
j                  |
j                  |v r||
j                     S ||	   S c c}}w )N)r5   r6   rW   r   )	r%   fxoperator_schemasnormalize_function	enumerater5   r   r   r   )output_aliasr   r5   r6   new_args
new_kwargsrp   r  arg_indicesidxarg_infor3   s              r   get_arg_from_aliasz7return_and_correct_aliasing.<locals>.get_arg_from_aliasn  s    $xx88KKtF  L  
*
 $K$4$45
!Q9TA
 
 ;1$$$!n##C(==$*)Dhmm,,}
s   	B9 B9NrW   r   )FunctionalTensorTc              3   2   K   | ]  } |      d u  y wr   rN   .0r   r  s     r   	<genexpr>z.return_and_correct_aliasing.<locals>.<genexpr>       H!q!-H   c              3   2   K   | ]  } |      d u  y wr   rN   r  s     r   r  z.return_and_correct_aliasing.<locals>.<genexpr>  r  r   zUnsupported schema: )r	  r   r$   r   r%   Taginplace_viewtagsr  r5   r   r   r  r   _mode_utilsno_dispatchr&   _meta_in_tls_dispatch_include!_set_meta_in_tls_dispatch_includeanyr   allRuntimeErrorr   r   r   r   zip)r3   r5   r6   outr   r  rp   r  mutated_argsr  meta_in_tlsr   oouts_to_returnr  s   `             @r   return_and_correct_aliasingr2  P  sb   $ !&K$ k4z#u/E#3 yy*
 "$
1{//23? 
 
 < A%%% 	I,q/+;<((446 L $hhDDF::4@L$)&)HH>>{KL H{7G7GHH
 H{7G7GHH1C4EEFF
4<< A%!K,,Q/0+tV
 	

 T#Y  #9[-=-=#>D		
 		
 !Q #1%1 ##K$4$4Q$78+tV 			
N i
, HH>>{KL L6		
s0   +!I>JI#J5J!I??JJ)Tr   )NN)<
contextlibrH   dataclassesr   typingr   r   r   r   r   r	   r
   r   r   r   r   r   typing_extensionsr   collectionsr   r%   r   torchgen.modeltorch._Cr   r   r   r   r   r   r   r   r   r!   rY   rd   rj   rm   rq   r;   r&   r\   rA   contextmanagerr   r   r   r   r   r   r   r   r   r   r   r   r   r	  r2  rN   r   r   <module>r;     s     ! j j j j '      # ', $g4 gW Wv*2"
#A
!& !$,%UXX-K-K KLM , Xk2   8 8v%- %9 9|#V #	:K0L #JY$ Y9TBS=T3U Y%P<KD       ,. 4Z( -
@J @Fmr   