
    Ǆg                     ,   d dl mZmZ g dZg dZ eddge        eddg        edd	d
g        edd        edddg        edd        ede        edd        ede        edddg        edddg        eddg        edddg        ed d!d"g        ed#d$        ed%dd&d'ged(d)        ed*d+d,-        ed.d/d,0        ed1d2d,0        ed3d4        ed5d6        ed7d8        ed9d:        ed;d<        ed=d>        ed?d@d,-        edAdB        edCdDd,-        edEdF        edGdHd,-        edIdJd,0        edKdL        edMdNd,-        edOdPd,d,Q        edRdSd,-        edTdU        edVdWd,d,X        edYdZd,d,X        ed[d\d,0        ed]d/d,0        ed^d/d,0        ed_d`d,0        edadbd,0        edcddd,0        ededfd,0        edgdhd,0        edidjd,0        edkdld,0        edmdnd/o       yp)q   )register_artifactregister_log)z%torch.fx.experimental.symbolic_shapesztorch.fx.experimental.sym_nodeztorch.fx.experimental.recording)ztorch.distributed"torch._dynamo.backends.distributedtorch.nn.parallel.distributeddynamoztorch._dynamofake_tensorztorch._subclasses.fake_tensoraotztorch._functorch.aot_autogradztorch._functorch._aot_autogradautogradztorch.autogradinductorztorch._inductorztorch._inductor.cudagraph_trees
cudagraphszGLogs information from wrapping inductor generated code with cudagraphs.dynamictorchdistributedc10dz"torch.distributed.distributed_c10dztorch.distributed.rendezvousddpr   r   ppztorch.distributed.pipeliningfsdpztorch.distributed.fsdpz"torch.distributed._composable.fsdpdtensorztorch.distributed._tensorztorch.distributed.tensoronnxz
torch.onnxexportztorch.exportztorch.export.dynamic_shapesztorch._export.converterztorch._export.non_strict_utilsguardszhThis prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.T)visibleverbose_guards )off_by_defaultbytecodez{Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.graphzvPrints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. 
graph_codez4Like `graph`, but gives you the Python code instead.graph_sizesz5Prints the sizes of all FX nodes in the dynamo graph.trace_sourcezAs we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`
trace_callzhLike trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.trace_bytecodezCAs we trace bytecode, prints the instruction and the current stack.
aot_graphszPrints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductoraot_joint_graphz_Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioningaot_graphs_effectszkPrints the FX forward and backward graph generated by AOTDispatch, useful for debugging effects processing.post_grad_graphsz}Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passescompiled_autogradzzPrints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.compiled_autograd_verbosezjWill affect performance. Prints compiled_autograd logs with C++ info e.g. autograd node -> fx node mapping
ddp_graphszOnly relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.
recompilesz?Prints the reason why we recompiled a graph. Very, very useful.recompiles_verbosezPrints all guard checks that fail during a recompilation. At runtime, Dynamo will stop at the first failed check for each failing guard. So not all logged failing checks are actually ran by Dynamo.)r   r   graph_breakszPrints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performancenot_implementedzPrints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch tooutput_codez>Prints the code that Inductor generates (either Triton or C++))r   r   kernel_codez?Prints the code that Inductor generates (on a per-kernel basis)schedulezIInductor scheduler information. Useful if working on Inductor fusion algo
perf_hintsonnx_diagnosticsfusionzADetailed Inductor fusion decisions. More detailed than 'schedule'loop_orderingzLogs related to loop orderingoverlapz0Detailed Inductor compute/comm overlap decisionssym_nodez.Logs extra info for various SymNode operationstrace_shape_eventszBLogs traces for every ShapeEnv operation that we record for replaycudagraph_static_inputsz:Logs static inputs handling in dynamo, AOT, and cudagraphsbenchmarkingz+Detailed Inductor benchmarking information.custom_format_test_artifactzTesting only)
log_formatN)	_internalr   r   DYNAMICDISTRIBUTED     e/home/mcse/projects/flask_80/flask-venv/lib/python3.12/site-packages/torch/_logging/_registrations.py<module>rB      s   6
 X2'2 3 ]<= > U46VW X Z) * Z+-NO P M
 Y   Wg  ]K ( 
13QR 	+-QR T23 4 V.0TU V Y46PQ R V\ " % 
	
 	" 	)
 n
 "Bt <  B
 | , V W J  F n I  Q
 e q
  D  A
 p
  K E
 C   Y
  S D	 E	 O
 ,4 8 $b > G
 #
 6
 4
 H
 @
 1 /B Or@   